repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
Marianasls/python3-exercises
https://github.com/Marianasls/python3-exercises
769fbfa2126cf100d77a57296cda7b7c1df2f4c8
e1eaf591cb883cd7b3bad67fb2df57dd12963219
07f134426ae2f4cffb324afa44fdbef51dd6d8f2
refs/heads/master
2020-03-26T15:16:55.502291
2019-10-03T23:58:37
2019-10-03T23:58:37
145,033,719
0
0
Apache-2.0
2018-08-16T20:01:31
2018-08-06T20:52:13
2018-08-06T20:52:11
null
[ { "alpha_fraction": 0.5267983078956604, "alphanum_fraction": 0.5387870073318481, "avg_line_length": 25.269229888916016, "blob_id": "a63e975bd4c97599ab77a41ebaf23c6b52079a28", "content_id": "0234305728aea3c4fd21cdaa25f1f51ad971b922", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1418, "license_type": "permissive", "max_line_length": 72, "num_lines": 52, "path": "/basic/string2.py", "repo_name": "Marianasls/python3-exercises", "src_encoding": "UTF-8", "text": "def verbing(s):\r\n t = len(s)\r\n if(t < 3): return s\r\n nova = s + 'ly' if(s[t-3:t] == 'ing') else s + 'ing'\r\n\r\n return nova\r\n\r\ndef not_bad(s):\r\n a = s.find('not')\r\n b = s.find('bad')\r\n return s.replace(s[a:], 'good') + s[b+3:] if a < b else s \r\n\r\ndef front_back(aa, bb):\r\n a = len(aa)\r\n b = len(bb)\r\n frontA = int(a/2) if a%2 == 0 else int(a/2 +1)\r\n backA = int(a/2)\r\n\r\n frontB = int(b/2) if b%2 == 0 else int(b/2 +1)\r\n backB = int(b/2)\r\n return aa[0:frontA] + bb[0:frontB] + aa[a-backA:] + bb[b-backB:]\r\n#a-front + b-front + a-back + b-back\r\n\r\ndef test(got, expected):\r\n if got == expected:\r\n prefix = ' OK '\r\n else:\r\n prefix = ' X '\r\n print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))\r\n\r\ndef main():\r\n print('verbing')\r\n test(verbing('hail'), 'hailing')\r\n test(verbing('swiming'), 'swimingly')\r\n test(verbing('do'), 'do')\r\n\r\n print()\r\n print('not_bad')\r\n test(not_bad('This movie is not so bad'), 'This movie is good')\r\n test(not_bad('This dinner is not that bad!'), 'This dinner is good!')\r\n test(not_bad('This tea is not hot'), 'This tea is not hot')\r\n test(not_bad(\"It's bad yet not\"), \"It's bad yet not\")\r\n\r\n print()\r\n print('front_back')\r\n test(front_back('abcd', 'xy'), 'abxcdy')\r\n test(front_back('abcde', 'xyz'), 'abcxydez')\r\n test(front_back('Kitten', 'Donut'), 'KitDontenut')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.8125, "alphanum_fraction": 0.84375, "avg_line_length": 31, "blob_id": "5df47937e77c9836f4b6eb400b4ea6a674bfebc6", "content_id": "e7bd1b965df2fffaa64d3812b0268e381e105a75", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 66, "license_type": "permissive", "max_line_length": 43, "num_lines": 2, "path": "/README.md", "repo_name": "Marianasls/python3-exercises", "src_encoding": "UTF-8", "text": "# python3-exercises\nExercícios usados no Codelab Python3 básico\n" } ]
2
PaulJulitz/Ommax-Case-Study
https://github.com/PaulJulitz/Ommax-Case-Study
dfa2943324a9ea9300dad788ecd2b77b782796df
3d756e9baedfb8e12039b7c561671274a5f97d4d
3a431aea2873a69605990e4bfdc272297eff1ba7
refs/heads/master
2022-02-16T01:39:05.648378
2019-09-12T16:15:08
2019-09-12T16:15:08
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7777777910232544, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 18, "blob_id": "d25ed05a4b0cbdb5ffb1bf70bff6a3564b1b4a97", "content_id": "1d1cdb5d9c38ef2c7dedc1dfee49a7c92835b7bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 18, "license_type": "no_license", "max_line_length": 18, "num_lines": 1, "path": "/README.md", "repo_name": "PaulJulitz/Ommax-Case-Study", "src_encoding": "UTF-8", "text": "# Ommax-Case-Study" }, { "alpha_fraction": 0.7033708095550537, "alphanum_fraction": 0.7101123332977295, "avg_line_length": 38.54545593261719, "blob_id": "dffb09922ad66a5881f76d1888340d0ca84e85f8", "content_id": "42000ba77c5e17842ab6aaf42213f7c06d41be64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 445, "license_type": "no_license", "max_line_length": 121, "num_lines": 11, "path": "/library.py", "repo_name": "PaulJulitz/Ommax-Case-Study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nimport requests # Library api request\r\nimport json # Library data standardization\r\nimport sqlite3 # Library SQLite\r\nimport random # Library random numbers\r\n\r\n### Connect to SQlite database ###\r\nconn = sqlite3.connect('Currency.db') # Connection to data base\r\nc = conn.cursor() # Save pointer \r\nc.execute('CREATE TABLE if not exists CURRENCY_Table ([date] text PRIMARY KEY, [GBP] float, [USD] float)') # create table " }, { "alpha_fraction": 0.5044247508049011, "alphanum_fraction": 0.5191740393638611, "avg_line_length": 26.41666603088379, "blob_id": "bd3da0b3c08f5d55b208a54da910af8179cce824", "content_id": "2b7469f42ff5435c9a1afe522944dcb5cb3f422a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 339, "license_type": "no_license", "max_line_length": 88, "num_lines": 12, "path": "/Ommax_function_display.py", "repo_name": "PaulJulitz/Ommax-Case-Study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nimport Ommax_function_load as ofl\r\n \r\ndef display():\r\n data = ofl.load()\r\n \r\n for h in data: # Check if data is the same\r\n sql_date = h[0]\r\n sql_gbp_rate = h[1]\r\n sql_usd_rate = h[2]\r\n print(f'On {sql_date} 1 EUR equals {sql_gbp_rate} GBP, and {sql_usd_rate} USD.')" }, { "alpha_fraction": 0.5439503788948059, "alphanum_fraction": 0.5698035359382629, "avg_line_length": 29.25806427001953, "blob_id": "8c160ec6c843045b2782cacf2761a22fe50cbd8b", "content_id": "843fde5c24ca57add877ef5976fb628438d61a1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 967, "license_type": "no_license", "max_line_length": 86, "num_lines": 31, "path": "/Python_application_ommax.py", "repo_name": "PaulJulitz/Ommax-Case-Study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\n#############################################\r\n# Python 3.6 Application: OMMAX Case Study\r\n#############################################\r\n\r\nfrom library import conn\r\n\r\n# Load only what you need: conn, c, requests, json, sqlite3, random\r\n# Load everything: *\r\n\r\nimport Ommax_function_save as ofs\r\nimport Ommax_function_check as ofc\r\nimport Ommax_function_display as ofd\r\n\r\n### Execute Application ###\r\nprint('#############################################################################')\r\nprint('Python 3.6 Application: Fixer Currency Rates from 01.09.2018 until 30.09.2018')\r\nprint('')\r\nprint('1. Fetch Data form Fixer API and save Data to SQLite Database.')\r\nprint(' ... ')\r\nofs.save()\r\nprint('')\r\nprint('2. SQLite Database holds the following values:')\r\nofd.display()\r\nprint('')\r\nprint('3. Check if five random entries in SQLite database are correct.')\r\nofc.check()\r\n\r\nconn.commit() # Save changes \r\nconn.close() # Close the connection" }, { "alpha_fraction": 0.5754716992378235, "alphanum_fraction": 0.6166380643844604, "avg_line_length": 42.769229888916016, "blob_id": "1c20efbe0f2224c3372ebd8d77cf04a08c9f7137", "content_id": "687e83fcf0520aff68ab0b4c3dc00b091dd1e8bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1166, "license_type": "no_license", "max_line_length": 123, "num_lines": 26, "path": "/Ommax_function_extractor.py", "repo_name": "PaulJulitz/Ommax-Case-Study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n# ACCESS Key: 8bffd25ad11e370e74c1386a39d8192d\r\n# Username: Augsburg\r\n\r\nfrom library import json, requests\r\n\r\ndef extractor(day, year= '2018', month='09'):\r\n url = \"http://data.fixer.io/api/\" + year + \"-\" + month + \"-\" + day # Only variable day\r\n ACCESS_KEY = \"8bffd25ad11e370e74c1386a39d8192d\"\r\n access_key = \"?access_key=\" + ACCESS_KEY\r\n currency = \"GBP,USD\" # More currency conversion rate can be added; Check: Show\r\n symbols = \"&symbols=\" + currency\r\n url += access_key + symbols # Url form\r\n\r\n ### load and save ###\r\n response = requests.get(url) # Request: fetch data\r\n data = response.text # Convert data to string \r\n obj = json.loads(data) # Convert data to json data type: python dict type\r\n# print(\"Out: \", obj) # Control message\r\n \r\n ### Extract API Values ###\r\n date = obj[\"date\"] # Extract date \r\n gbp_rate = obj[\"rates\"][\"GBP\"] # Read Value for gbp rate\r\n usd_rate = obj[\"rates\"][\"USD\"] # Read Value for usd rate\r\n# print(\"On \" + date + \" 1 EUR equals \" + str(gbp_rate)+ \" GBP, and \" + str(usd_rate)+ \" USD.\") # Display fetched values\r\n return (date,gbp_rate, usd_rate)\r\n\r\n" }, { "alpha_fraction": 0.5934579372406006, "alphanum_fraction": 0.5957943797111511, "avg_line_length": 30.923076629638672, "blob_id": "89232fd1f44542734d01a5ed4a3fdd24dbd93cfe", "content_id": "a7b0d57c5d3e9c754fcaf50c562c65808f825f42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 428, "license_type": "no_license", "max_line_length": 85, "num_lines": 13, "path": "/Ommax_function_load.py", "repo_name": "PaulJulitz/Ommax-Case-Study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nfrom library import c\r\n \r\ndef load():\r\n data = [] # List\r\n b = 'select * from CURRENCY_Table'\r\n# b = 'select date, GBP, USD from CURRENCY_Table' # Single entries\r\n d = c.execute(b) # Pointer (sql curser)\r\n for i in d: # iterate over object (curser object has the property of an iterator)\r\n # print(i) Control message \r\n data.append(i) # Addend to list \r\n return data\r\n" }, { "alpha_fraction": 0.5313315987586975, "alphanum_fraction": 0.5417754650115967, "avg_line_length": 32.772727966308594, "blob_id": "8f622fa4287d84d6c3186b85a718d1b9ec584957", "content_id": "08440749e7f373df79fe812286db051d8d590d66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 766, "license_type": "no_license", "max_line_length": 97, "num_lines": 22, "path": "/Ommax_function_save.py", "repo_name": "PaulJulitz/Ommax-Case-Study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nfrom library import c\r\n\r\nimport Ommax_function_extractor as ofe\r\n\r\ndef save():\r\n for i in range(1,31):\r\n if i < 10: # Data format: 0x or yx \r\n i = \"0\"+str(i)\r\n else:\r\n i = str(i)\r\n data = ofe.extractor(i) # Push data\r\n\r\n try: # Ignore Error Message; Alternative clear database: DROP TABLE Statement\r\n a = 'insert into CURRENCY_Table values (\"{}\",{},{})'.format(*data) # Pointer to data\r\n# a = 'insert into CURRENCY_Table values (\"{}\",{},{})'.format(date, gbp_rate, usd_rate)\r\n c.execute(a) # Save in data base\r\n \r\n except Exception as e: # Exception writes error message into e\r\n# print(e) # Check of error message \r\n pass \r\n" }, { "alpha_fraction": 0.5476190447807312, "alphanum_fraction": 0.5520833134651184, "avg_line_length": 27.30434799194336, "blob_id": "abd92276c9d5106bbfde60f280ec8c52a37a474f", "content_id": "3d28cb10eb4b89991ca1f0a35e5af2c87ac1ae7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 672, "license_type": "no_license", "max_line_length": 61, "num_lines": 23, "path": "/Ommax_function_check.py", "repo_name": "PaulJulitz/Ommax-Case-Study", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\r\n\r\nfrom library import random\r\n\r\nimport Ommax_function_load as ofl\r\nimport Ommax_function_extractor as ofe\r\n\r\ndef check():\r\n data = ofl.load() # List\r\n choice = [] # \r\n for x in range(5):\r\n c = random.choice(data)\r\n data.remove(c) # Extract one element of the set\r\n choice.append(c) # Saves the extracted element\r\n \r\n for h in choice: # Check if data is the same\r\n datum = h[0]\r\n y,m,d = datum.split('-')\r\n aus = ofe.extractor(d, y, m)\r\n if aus != h:\r\n print('SQlite data for '+ datum +' is INCORRECT.')\r\n else:\r\n print('SQlite data for '+ datum +' is correct.')" } ]
8
koebbe/homeworks-mockup
https://github.com/koebbe/homeworks-mockup
dd51d1d0deed316f1426125d84b2ac59887642c7
0791248a9c44f2f05082822b450f5df8d0d368cf
79d21896ff330f9bf68fdee5c16ab4da4b7e97f6
refs/heads/master
2016-09-10T01:23:19.471252
2015-04-17T01:04:16
2015-04-17T01:04:16
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5236111283302307, "alphanum_fraction": 0.5263888835906982, "avg_line_length": 29.63829803466797, "blob_id": "7c3219a3d68c7d3ecc717534d419d3804bebd293", "content_id": "9665793e9f33b49f87cfb22775f6bbf8fe6924e9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1440, "license_type": "permissive", "max_line_length": 134, "num_lines": 47, "path": "/mockup/create-data.py", "repo_name": "koebbe/homeworks-mockup", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport csv\nimport json\n\ndef student():\n d = open('data-student.csv')\n d.readline()\n \n l = list(csv.DictReader(d, ['district', 'school', 'teacher', 'id', 'first_name', 'last_name', 'gender', 'ethnicity']))\n \n schools = []\n \n for x in l:\n x['first_name'] = x['first_name'].title()\n x['last_name'] = x['last_name'].title()\n x['name'] = \"%s %s\" % (x['first_name'], x['last_name'])\n if x['school'] not in [s['id'] for s in schools]:\n schools.append({ 'id': x['school'], 'name': x['school'] })\n \n f = open('data-student.json', 'w')\n f.write(json.dumps(l, sort_keys=True, indent=4, separators=(',', ': ')))\n f.close()\n \n f = open('data-school.json', 'w')\n f.write(json.dumps(schools, sort_keys=True, indent=4, separators=(',', ': ')))\n f.close()\n\ndef staff():\n d = open('test-data-teacher.csv')\n d.readline()\n \n keys = [\"district\",\"school\",\"last_name\",\"first_name\",\"Position Title (c)\",\"Grade \",\"Class Size (e)\",\"E-Mail Contact (f)\",\"status\"]\n l = list(csv.DictReader(d, keys))\n for x in l:\n x['name'] = '%s %s' % (x['first_name'], x['last_name'])\n x['school'] = x['school'].split()[0]\n x['status'] = x['status'].lower()\n \n f = open('data-teacher.json', 'w')\n f.write(json.dumps(l, sort_keys=True, indent=4, separators=(',', ': ')))\n f.close()\n \n\nif __name__ == '__main__':\n student()\n staff()\n" }, { "alpha_fraction": 0.4845397174358368, "alphanum_fraction": 0.49156710505485535, "avg_line_length": 29.913043975830078, "blob_id": "36009d8e6b8713fb20d14b751f1df593ede6394c", "content_id": "712abe3b53d44f60ec47911098ba59604c78cd90", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2846, "license_type": "permissive", "max_line_length": 115, "num_lines": 92, "path": "/mockup/hw.js", "repo_name": "koebbe/homeworks-mockup", "src_encoding": "UTF-8", "text": " var myapp = angular.module(\"myApp\", []);\n myapp.controller(\"visitCtrl\", [\"$scope\", \"$http\", \"$log\", \"$location\", function($scope, $http, $log, $location) {\n\n $scope.STUDENTS = $http.get('data-student.json')\n .then(function(res){\n return res.data; //$scope.STUDENTS = res.data; \n });\n $scope.SCHOOLS = $http.get('data-school.json')\n .then(function(res){\n return res.data; //$scope.SCHOOLS = res.data; \n });\n\n $scope.STAFF = $http.get('data-teacher.json')\n .then(function(res){\n return res.data; //$scope.SCHOOLS = res.data; \n });\n\n $scope.staffroles = [\n { id: 1, name: 'Classroom Teacher' },\n { id: 1, name: 'Specialist' },\n { id: 1, name: 'Counselor' },\n { id: 1, name: 'Administrator' },\n { id: 1, name: 'Special Education Staff' },\n { id: 1, name: 'Interpreter' }\n ];\n\n var params = $location.search();\n\n $scope.CAREGIVERS = [\n { id: 1, name: 'CG A', students: [ 1 ] },\n { id: 2, name: 'CG B', students: [ 1 ] },\n { id: 3, name: 'CG C', students: [ 2 ] },\n { id: 4, name: 'CG D', students: [ 2 ] },\n { id: 5, name: 'CG E', students: [ 3 ] }\n ];\n\n function make_data() {\n var data = []\n $scope.SCHOOLS.then(function(schools) {\n angular.forEach(schools, function(school) {\n school.students = [];\n school.staff = [];\n $scope.STAFF.then(function(teachers) {\n angular.forEach(teachers, function(staff) {\n if (staff.school == school.id) {\n school.staff.push(staff);\n }\n });\n });\n $scope.STUDENTS.then(function(students) {\n angular.forEach(students, function(student) {\n if (student.school == school.id) {\n school.students.push(student);\n student.caregivers = [{ id: 0, name: \"New\" }];\n angular.forEach($scope.CAREGIVERS, function(cg) {\n angular.forEach(cg.students, function(cgst) {\n if (cgst == student.id) {\n student.caregivers.push(cg);\n }\n });\n });\n }\n });\n });\n data.push(school);\n });\n });\n return data;\n }\n $scope.resetForm = function() {\n $scope.visit = { \n school: null, \n student: null, \n caregiver: 0 \n };\n $scope.data = make_data();\n if (angular.isDefined(params.id)) {\n $scope.STAFF.then(function(teachers) {\n $scope.staff1 = teachers[parseInt(params.id)];\n $scope.SCHOOLS.then(function(schools) {\n angular.forEach(schools, function(s) {\n if (s.name == $scope.staff1.school) {\n $scope.visit.school = s;\n }\n });\n });\n });\n }\n };\n $scope.resetForm();\n\n }]);\n" } ]
2
MultipleCrashes/network_requests
https://github.com/MultipleCrashes/network_requests
44c2fa03199bd72f55398b9ba3b28be92b481f8e
9b11abe6d80a3584cd5d245f3a4900ed3594ea34
5ef45167db2b218d7187e49e31bcfde196798b4a
refs/heads/master
2021-04-15T15:14:18.251416
2018-08-28T13:33:27
2018-08-28T13:33:27
126,318,564
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.678947389125824, "alphanum_fraction": 0.6894736886024475, "avg_line_length": 30.66666603088379, "blob_id": "9dad3b44b0b7745fa3b17ef5a4ce2b09966d2cf3", "content_id": "645223728764b59502addbb29d9ce3af01557299", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 49, "num_lines": 6, "path": "/run_parallel.py", "repo_name": "MultipleCrashes/network_requests", "src_encoding": "UTF-8", "text": "import subprocess\n# Create ports to run in threads\nthreads = 5\nfor x in range(1, threads):\n print('No of create port script launched', x)\n p = subprocess.Popen([ './create_ports.sh'])\n" }, { "alpha_fraction": 0.4753488302230835, "alphanum_fraction": 0.59937983751297, "avg_line_length": 63.47999954223633, "blob_id": "a3be46181611af741c54ab8840531a66b1d72f92", "content_id": "e56360ef06bc70d502823ebeaf46438bc0f2ee05", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 3225, "license_type": "no_license", "max_line_length": 335, "num_lines": 50, "path": "/neutron_query.sh", "repo_name": "MultipleCrashes/network_requests", "src_encoding": "UTF-8", "text": "host=\"localhost\"\nport=7000\necho $host\n\n#Delete if network is existing\ncurl -X DELETE http://$host:$port/v2.0/networks/8be447f3-2656-4c13-b1ed-e67d84ec91b3 \n\n\n# Create 0000 network \necho \"####################### Create 0000 Network ####################### \"\ncurl -X POST -H \"Content-Type: application/json\" -d '{\"network\": {\"id\":\"00000000-0000-0000-0000-000000000000\", \"tenant_id\": \"provider\", \"name\":\"public\"}}' http://$host:$port/v2.0/networks | jq \".\"\n# Create a network\necho \"####################### Creating a Network ####################### \"\ncurl -X POST -H \"Content-Type: application/json\" -d '{\"network\": {\"id\":\"8be447f3-2656-4c13-b1ed-e67d84ec91b3\", \"tenant_id\": \"provider\", \"name\":\"public\"}}' http://$host:$port/v2.0/networks | jq \".\"\n\n# Create subnet in that network\necho \"####################### Creating a subnet , within the network #######################\"\ncurl -X POST -H \"Content-Type: application/json\" -d '{\"subnet\":{\"network_id\":\"00000000-0000-0000-0000-000000000000\",\"cidr\":\"10.4.0.0/16\", \"segment_id\":\"blah\", \"tenant_id\": \"harishupadhyay\", \"ip_version\": \"4\"}}' http://$host:$port/v2.0/subnets | jq \".\"\n\n# Create a mac address ranage \n\necho \"####################### Creating mac address range , inside the network #######################\"\ncurl -X POST -H \"Content-Type: application/json\" -d '{\"mac_address_range\": {\"cidr\" : \"AA:BB:CC\", \"tenant_id\": \"provider\"}}' http://localhost:9696/v2.0/mac_address_ranges.json | jq \".\"\n\n# Create a port\necho \"####################### Creating a port in the subnet, inside the network #######################\"\ncurl -X POST -H \"Content-Type:application/json\" -d '{\"port\":{\"admin_state_up\":true,\"name\":\"port0\",\"tenant_id\":\"harishupadhyay\",\"network_id\":\"00000000-0000-0000-0000-000000000000\",\"segment_id\":\"blah\"}}' http://$host:$port/v2.0/ports | jq \".\"\n\n# Get all ports\necho \"####################### List all ports #######################\"\ncurl -X GET http://localhost:$port/v2.0/ports | jq \".\"\n\n\n# Create Fixed IP address \necho \"####################### Create Fixed IP Address #######################\"\ncurl -X POST -H \"Content-Type:application/json\" -d '{\"port\": { \"segment_id\":\"blah\",\"tenant_id\": \"provider\",\"admin_state_up\": true,\"name\": \"port1\",\"fixed_ips\": [{\"ip_address\": \"10.3.0.13\",\"subnet_id\": \"7aebc226-73d7-4293-99cb-0b04fd6287a0\"}],\"network_id\": \"00000000-0000-0000-0000-000000000000\"}}' http://$host:$port/v2.0/ports | jq \".\"\n\n#Shared IP address\n#curl -X POST -H \"Content-Type:application/json\" -d @shared_ip.json http://$host:$port/v2.0/ip_addresses | jq \".\"\necho \"####################### Creating a shared IP within the network #######################\"\ncurl -X POST -H \"Content-Type:application/json\" -d '{\"ip_address\":{\"network_id\": \"00000000-0000-0000-0000-000000000000\",\"port_ids\":[\"f035a962-5849-490e-9fae-147337bcb277\",\"e6aba027-2913-4b92-906c-cd6883977f72\"],\"version\": 4,\"tenant_id\": \"harishupadhyay\"}}' http://$host:$port/v2.0/ip_addresses | jq \".\"\n\n# Delete 000 port \ncurl -X DELETE http://$host:$port/v2.0/ports/00000000-0000-0000-0000-000000000000 | jq \".\"\n\n\n#Delete if network 00000\n\necho \"####################### Delete port 0000 #######################\"\ncurl -X DELETE http://$host:$port/v2.0/networks/00000000-0000-0000-0000-000000000000 \n" }, { "alpha_fraction": 0.6587957739830017, "alphanum_fraction": 0.6646989583969116, "avg_line_length": 26.354839324951172, "blob_id": "9085e497b97c2b4c9298ccfb06343b60f1eb8d8c", "content_id": "5670f4e9d1402296b27a3cd80748e08820025d57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 847, "license_type": "no_license", "max_line_length": 67, "num_lines": 31, "path": "/loop.py", "repo_name": "MultipleCrashes/network_requests", "src_encoding": "UTF-8", "text": "from oslo_service import loopingcall \nfrom keystoneauth1 import exceptions as keystone_exception\nfrom oslo_utils import excutils\n\n\ndef deallocate_fn():\n\tprint('called deallocate')\n\txyz()\n\t\ndef xyz():\n\traise keystone_exception.connection.ConnectFailure\n\ndef test_fn():\n @loopingcall.RetryDecorator(\n max_retry_count=3, inc_sleep_time=2, max_sleep_time=12,\n exceptions=(keystone_exception.connection.ConnectFailure,))\n def _deallocate_network_with_retries():\n try:\n deallocate_fn()\n except keystone_exception.connection.ConnectFailure as e:\n # Provide a warning that something is amiss.\n with excutils.save_and_reraise_exception():\n \tprint('xyz')\n\n try:\n _deallocate_network_with_retries()\n except Exception as e:\n print(e)\n \n\ntest_fn()" }, { "alpha_fraction": 0.4730434715747833, "alphanum_fraction": 0.4852173924446106, "avg_line_length": 22.875, "blob_id": "462cdfa5b84321b2f3b18f9c34d55072639e85c0", "content_id": "a965bba39424a093ea4e97b18064ef07703bb684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 575, "license_type": "no_license", "max_line_length": 55, "num_lines": 24, "path": "/retry_logic.py", "repo_name": "MultipleCrashes/network_requests", "src_encoding": "UTF-8", "text": "\n\nMAX_RETRIES = 3\nimport time \n\ndef some_fnc():\n delay = 30\n retry_count = 1\n while True:\n if retry_count == MAX_RETRIES:\n break \n try:\n retry_count = retry_count + 1\n # Code logic goes here \n x = 1/0\n except Exception as e:\n print('Retrying number ' +str(retry_count))\n # Delay before retry \n delay = delay + delay\n print('Delay', delay)\n delay = delay * retry_count\n time.sleep(delay)\n continue \n break\n\nsome_fnc()\n" }, { "alpha_fraction": 0.6336842179298401, "alphanum_fraction": 0.648421049118042, "avg_line_length": 19.34782600402832, "blob_id": "10ab5296139fcc07a8a27b70e1eabe97b327b138", "content_id": "517bfff4ef92b6e76e7d4b54124923d6a1d62d48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/log_processor.py", "repo_name": "MultipleCrashes/network_requests", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\n\n\nprint('-'*100)\n\nerror_log = sys.argv[1:]\n\n\nignore_list = ['wifi error', 'preferred network']\nfatal_error = ['quark-agent','redis connectivity', 'wifi']\n\nfor log_line in error_log:\n\tfor major_error in fatal_error:\n\t\tif major_error in log_line:\n\t\t\tprint('\\n')\n\t\t\tprint('Fatal error found ->', log_line)\n\t\t\tprint('.'*100)\n\tfor minor_error in ignore_list:\n\t\tif minor_error in log_line:\n\t\t\tprint('\\n')\n\t\t\tprint('Minor error found skipping')\n\t\t\t\n\n\n\n" }, { "alpha_fraction": 0.5421686768531799, "alphanum_fraction": 0.6837349534034729, "avg_line_length": 35.77777862548828, "blob_id": "a8c130d5dfbe8e7b12cae9009810109c4679e82e", "content_id": "15afc682c69801be6f0060a7665084cd249d98bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 332, "license_type": "no_license", "max_line_length": 240, "num_lines": 9, "path": "/create_ports.sh", "repo_name": "MultipleCrashes/network_requests", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash \nhost=\"localhost\"\nport=9696\n\nfor number in {1..100000}\ndo\ncurl -X POST -H \"Content-Type:application/json\" -d '{\"port\":{\"admin_state_up\":true,\"name\":\"port0\",\"tenant_id\":\"harishupadhyay\",\"network_id\":\"00000000-0000-0000-0000-000000000000\",\"segment_id\":\"blah\"}}' http://$host:$port/v2.0/ports | jq \".\"\ndone\nexit 0\n\n" }, { "alpha_fraction": 0.7505470514297485, "alphanum_fraction": 0.7855579853057861, "avg_line_length": 40.54545593261719, "blob_id": "8b30f3b56d452a0c0327a02fb89270db8aee27f8", "content_id": "18a4b9f5e108cb9f8423073df634d90b10baf7a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 459, "license_type": "no_license", "max_line_length": 122, "num_lines": 11, "path": "/quark_db.py", "repo_name": "MultipleCrashes/network_requests", "src_encoding": "UTF-8", "text": "# local system\n\nfrom neutron.db import api as ndbapi\nfrom oslo_utils import timeutils\nfrom quark.db.models import *\nsession = ndbapi.get_session()\nfrom quark.db.models import IPAddress\n# table to query quark_ip_address \nobj =session.query(IPAddress).filter(IPAddress.address_readable=='10.1.24.133')\ncurrent_time = timeutils.utcnow()\nsession.query(IPAddress).filter(IPAddress.address_readable=='10.1.24.133’).update({IPAddress.deallocated_at:current_time})\n" }, { "alpha_fraction": 0.5797570943832397, "alphanum_fraction": 0.5919028520584106, "avg_line_length": 29.875, "blob_id": "1a7251730056b1ae8a2a446e4f1f9b36b9bbde30", "content_id": "fb9413af8321efffbe88df0192e02b07d64e8e59", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1235, "license_type": "no_license", "max_line_length": 93, "num_lines": 40, "path": "/get_port.py", "repo_name": "MultipleCrashes/network_requests", "src_encoding": "UTF-8", "text": "import requests\nimport pprint\nimport json\nimport threading\n\nhost='localhost'\nport=9696\nimport thread\n\ndef get_and_delete_port():\n get_query_string = 'http://' + host + ':' + str(port) + '/v2.0/ports'\n print('get query string', get_query_string)\n all_ports = requests.get(get_query_string)\n ports_json = json.loads(all_ports.text)\n port_list = ports_json['ports']\n all_port_ids = []\n for x in port_list:\n # For list of ports , issue a delete request\n all_port_ids.append(x['id'])\n i = 1\n total_port_count = len(all_port_ids)\n while i < total_port_count:\n try:\n t = threading.Thread(target=delete_port, args = (all_port_ids[i:i+200],))\n t.start()\n i= i + 200\n print 'i ->', i\n except Exception as e:\n print('Unable to start thread -> ', str(e))\n\n\ndef delete_port(port_list):\n for ports in port_list:\n print 'Deleting port', ports\n delete_port_query_string = 'http://' + host + ':' + str(port) +'/v2.0/ports/' + ports\n delete_request = requests.delete(delete_port_query_string)\n print 'Delete Response Code -> ', delete_request.status_code\n\nif __name__ == '__main__':\n get_and_delete_port()\n" } ]
8
rizkymsyahputra/korankemarin
https://github.com/rizkymsyahputra/korankemarin
4d138a8026d4c7736c11d55f85c37dea77941086
9e7516cde7bb78d6b91d9d18aa028857ea8cbde0
f7354eddf9831559ff7096ea072d316a7995b361
refs/heads/master
2021-01-10T05:10:13.972873
2016-03-12T02:43:48
2016-03-12T02:43:48
53,464,172
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6435160040855408, "alphanum_fraction": 0.6549104452133179, "avg_line_length": 28.253969192504883, "blob_id": "b67151a44cfa70e78ac9bb643b8270f55cc405da", "content_id": "ff2a8a52f24e42fa945365c438844aeda7098822", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1843, "license_type": "no_license", "max_line_length": 122, "num_lines": 63, "path": "/pull.py", "repo_name": "rizkymsyahputra/korankemarin", "src_encoding": "UTF-8", "text": "#!/bin/python\n\n#============================================================\n# Mengimport modul modul yang di perlukan\nimport urllib\nimport urllib.request\nimport os\nimport time\n\n# Ambil jam, lalu di pecah-pecah kan per variable\ngettime=time.strptime(time.ctime())\ntahun= str(gettime[0])\nbulan= str(gettime[1])\ntanggal= str(gettime[2])\njam= str(gettime[3])\nmenit= str(gettime[4])\n\n# Cek jam dan memberi informasi apakah yang akan di download koran kemarin atau hari ini\nif int(jam) > 16:\n print(\"++ FETCHING ALL PAGE OF TODAY SERAMBI EPAPER ++\")\nelse:\n if int(tanggal)>1:\n tanggal= int(tanggal)-1\n tanggal=str(tanggal)\n print(\"++ FETCHING ALL PAGE OF YESTERDAY SERAMBI EPAPER ++\")\n\n# Format nama folder per harian\nfoldername= tahun+\"_\"+bulan+\"_\"+tanggal\n\n# Cek folder untuk mengisi filenya apa sudah ada atau belum\nwhile True:\n try:\n os.chdir(\"Serambi_Epapers\")\n break\n except FileNotFoundError:\n os.system(\"mkdir Serambi_Epapers\")\n os.chdir(\"Serambi_Epapers\")\n break\n\nwhile True:\n try:\n os.chdir(foldername)\n break\n except FileNotFoundError:\n os.mkdir(foldername)\n os.chdir(foldername)\n break\n\n# Eksekusi download file halaman pertama sampai halaman terakhir\nfor page in range(1,25) :\n # Nomer Halaman yang kurang dari 10, di beri digit 0 sebelum nya\n if page<10:\n page=str(page)\n page=\"0\"+page\n # Format Penamaan File\n filename= \"SERAMBI_EPAPER_\"+tahun+\"_\"+bulan+\"_\"+tanggal+\"_PAGE\"+str(page)+\".jpg\"\n # Menampilkan informasi halaman berapa yang sedang di download\n print(\"> DOWNLOADING \"+filename)\n # Eksekusi download per halaman\n urllib.request.urlretrieve(\"https://s3-ap-southeast-1.amazonaws.com/tribun-3/epaper/aceh/\"+str(page)+\".jpg\", filename)\n\n# Selesai\nprint(\"++ DONE!! ENJOY YOUR DAY ++\")\n" } ]
1
Comput3rZ/GitHub-Pages-Build-Notifier
https://github.com/Comput3rZ/GitHub-Pages-Build-Notifier
3662685c74b2573fe05c01017b6692ffd1afd316
29f27aefbb9bf9caddce3c88f43bf079d1fda769
db1c5dfec80518d099d9c5bf8b2ed1da24d8474e
refs/heads/master
2020-03-28T14:54:29.985703
2018-09-12T20:09:12
2018-09-12T20:09:12
148,535,112
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5558408498764038, "alphanum_fraction": 0.5571244955062866, "avg_line_length": 19.5, "blob_id": "dd8772d2ee66157d9803e27a6a0840323883062f", "content_id": "ba18a730ee43a4bcfa245d5e33ff4339bd0075e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 779, "license_type": "no_license", "max_line_length": 67, "num_lines": 38, "path": "/app.py", "repo_name": "Comput3rZ/GitHub-Pages-Build-Notifier", "src_encoding": "UTF-8", "text": "import requests\nimport time\nimport os\n\nURL = \"YOUR-URL\"\n\n# The notifier function\ndef notify(title, message):\n t = '-title {!r}'.format(title)\n m = '-message {!r}'.format(message)\n i = '-appIcon {!r}'.format(\"path/to/icon.png\")\n os.system('terminal-notifier {}'.format(' '.join([m, t, i])))\n\ndef Ok():\n # Calling the function\n notify(title = 'Success!',\n message = 'Your GitHub Pages site has been published.')\n\nglobal prev\nprev = \"\"\n\nfirst = True\n\nwhile True:\n url = URL\n response = requests.get(url)\n text = response.text\n\n if (text != prev):\n if (first == True):\n first = False\n else:\n # Change detected\n print(\"Update Detected\")\n Ok()\n prev = text\n\n time.sleep(1)\n" }, { "alpha_fraction": 0.7737705111503601, "alphanum_fraction": 0.7737705111503601, "avg_line_length": 60, "blob_id": "eeb3314e182f7de03e6b741046784cd02646eb06", "content_id": "1827f2e80564d1f4c5315b7bfcadc3a948a2a641", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 610, "license_type": "no_license", "max_line_length": 132, "num_lines": 10, "path": "/README.md", "repo_name": "Comput3rZ/GitHub-Pages-Build-Notifier", "src_encoding": "UTF-8", "text": "# GitHub-Pages-Build-Notifier\nA python script that monitors change to a github pages site (or any site) and sends a custom notification when a change is detected.\n\n# Setup\n* Download and install [julienXX's terminal-notifier](https://github.com/julienXX/terminal-notifier)\n* Download and extract this repository\n* Replace `icon.png` with whatever icon you want to show in the notification (keep the name the same)\n* Change the URL variable in `app.py` to the url of the page to monitor\n* Change `path/to/icon.png` to the complete path to the icon.png file\n* Run the script and wait for a change to be detected\n" } ]
2
priyanka2109/healthyhacks
https://github.com/priyanka2109/healthyhacks
d17609ca90b1c3756b4a6d564d20a89f0e86bbfc
b946b1bf37457434137c7b3b680ad1afcff271b5
240173af2a292931e28601e184542b3a87b52753
refs/heads/master
2022-12-19T12:55:43.143742
2020-09-27T14:12:27
2020-09-27T14:12:27
299,042,812
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6195005774497986, "alphanum_fraction": 0.6325802803039551, "avg_line_length": 21.41666603088379, "blob_id": "578bd6b9324def5490af923e71cf3eaaa4939f92", "content_id": "0929af59d44096d1c56adf205d9311f18ab4514d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 841, "license_type": "no_license", "max_line_length": 68, "num_lines": 36, "path": "/index.py", "repo_name": "priyanka2109/healthyhacks", "src_encoding": "UTF-8", "text": "import tweepy\r\nimport sys\r\n\r\nclass MyStreamListener(tweepy.StreamListener):\r\n def on_status(self,status):\r\n count=0\r\n while count<4:\r\n print(status.text)\r\n count=count+1\r\n\r\n \r\n def on_error(self,status_code):\r\n cnt=0\r\n while cnt<4:\r\n print(status_code)\r\n cnt=cnt+1\r\n \r\n\r\n\r\nconsumer_key=\"XZlNv3ff7wMPdZQZx3fB1GKVK\"\r\nconsumer_secret=\"zYTqErmiQDHYWH7mZyAFgrUbNYK6xsDrYRBwU3IqN3yRn6AG6o\"\r\naccess_token=\"923811743062663168-4JpwdWOQlSo3hPqS6RtmGqOmuMjuhq6\"\r\naccess_token_secret=\"2utCRjt5FgCGv8jWfs7EzEBsmtOlVtoflPJ5j1PSjeWX1\"\r\n\r\n\r\nauth = tweepy.OAuthHandler(consumer_key,consumer_secret)\r\nauth.set_access_token(access_token, access_token_secret)\r\napi = tweepy.API(auth)\r\n\r\nif (not api):\r\n print(\"Authentication failed!\")\r\n sys.exit(-1)\r\n\r\nmyStreamListener = MyStreamListener()\r\nmyStream = tweepy.Stream(auth = api.auth, listener=myStreamListener)\r\nmyStream.filter(track=[\"news\"])" } ]
1
acmassac/NTNU
https://github.com/acmassac/NTNU
c017764cbd3ab538417ff397bd313c341f03a2ae
caff08e3280813c2e3ee9a36cef655d2fdf04531
6f44eeeb0818970068cfc50c93a758db1b125ed0
refs/heads/master
2023-03-16T07:43:20.970480
2019-07-03T08:32:23
2019-07-03T08:32:23
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6001704931259155, "alphanum_fraction": 0.6329923272132874, "avg_line_length": 38.6779670715332, "blob_id": "6d0e03c33aead14756da556e235128c702800b85", "content_id": "ca70d3ff6a700d1d4700d1ec44e40c4426eb7030", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2361, "license_type": "no_license", "max_line_length": 103, "num_lines": 59, "path": "/TFY4115 Fysikk/Øv8/tfy4115_Ov8.py", "repo_name": "acmassac/NTNU", "src_encoding": "UTF-8", "text": "# TFY4115 Øving 8, oppgave 1d.\n# Verlet-integrasjon av svingeligningen med dempning.\n\n# Definerer parametrene (kan endres før hver kjøring av programmet):\n#-------------------------------------------------------------------\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport time\n# Pakkene numpy og matplotlib må installeres manuelt i f.eks IDLE eller pyCharm før scriptet kan kjøre.\n\nomega0 = 1 \ngamma = 0.05 # Typen dempning avgjøres om gamma < = > omega0\ndt=0.1 # Tidsinkrementet\nt = np.arange(0,10,dt) # Tidsintervallet vi integrerer over t=0:dt:10.0 \nx=0*t # Initialiserer posisjonsvektoren til dimensjon lik antall t.\nx[0]=1.0 # Kulas startposisjon.\nx[1]=1.0 # Neste posisjon, hvis ulik x(0) er starthastigheten ulik 0 \n\n# Numerisk løsning:\n#------------------\n\naa=(2-omega0**2*dt**2)/(1+gamma*dt) # Definerer forenklende parametre.\nbb=(1-gamma*dt)/(1+gamma*dt)\n\nfor n in range(1,len(t)-1): #for n=2:(length(t)-1)\n x[n+1]=aa*x[n]-bb*x[n-1] # Verlet-integrasjonen\n\nplt.plot(t,x,'b', label='numerisk') # Numerisk løsning blå strek \nplt.title(r'Dempa pendel')\nplt.ylabel(r'$x$')\nplt.xlabel(r'$t/$s')\nplt.legend(loc='best')\n\n# Vi sammenligner nå med de analytiske løsninger\n# OBS: Uttrykkene er riktige bare når starthastighet = 0 , dvs. x(0)=x(1)\n#------------------\nif (gamma < omega0): # Underkritisk demping \n omegad=math.sqrt(omega0**2-gamma**2)\n phi=math.atan(-gamma/omegad) # Fasevinkel, antar starthastighet = 0\n aa=x[1]/math.cos(phi) # Amplitude A\n for n in range(0,len(t)-1): \n x[n]=aa*math.exp(-gamma*t[n])*math.cos(omegad*t[n]+phi)\n\nif (gamma > omega0): # Overkritisk demping gamma > omega0 \n omegad=math.sqrt(gamma**2-omega0**2)\n aa=x[1]*0.5*(1+gamma/omegad) # Amplitudeverdi A, antar starthastighet = 0\n bb=x[1]*0.5*(1-gamma/omegad)# Amplitudeverdi B, antar starthastighet = 0\n for n in range(0,len(t)-1):\n x[n]=math.exp(-gamma*t[n])*(aa*math.exp(omegad*t[n])+bb*math.exp(-omegad*t[n]))\nif (gamma == omega0): # Kritisk demping\n aa=x[1] # Amplitudeverdi A\n bb=x[1]*gamma # Amplitudeverdi B, antar starthastighet = 0\n for n in range(0,len(t)-1):\n x[n]=(aa + bb*t[n]) * math.exp(-gamma*t[n])\n \nplt.plot(t,x,'r', label='analytisk') # Analytisk løsning rød strek\nplt.legend(loc='best')\nplt.show() \n\n\n" }, { "alpha_fraction": 0.703125, "alphanum_fraction": 0.7109375, "avg_line_length": 14.875, "blob_id": "6b637319f9ddd1cf9136fc73e639de3d4d4e2f4e", "content_id": "eca0c23ffd0c74ec5db022690a4694e1643b4a76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 130, "license_type": "no_license", "max_line_length": 40, "num_lines": 8, "path": "/README.md", "repo_name": "acmassac/NTNU", "src_encoding": "UTF-8", "text": "# NTNU\nFag ol fra mastern\n\nWikipendum er en fin ting å ha <3\n\nSe her også: \n- https://github.com/RoboKitchen/recipes\n- mttk.no \n" } ]
2
vjpie2002/ndscheduler
https://github.com/vjpie2002/ndscheduler
85fb363790a4a0f98dbd0bb986580c2d4f896069
5131d44a07f6dab0aa51decb512485a9fcb5c83f
901428c21ddac7a3fac9f66d7b16bd04b3d3b3f6
refs/heads/master
2021-07-14T06:22:08.147588
2017-10-13T22:11:56
2017-10-13T22:11:56
106,716,323
0
0
null
2017-10-12T16:12:48
2017-10-03T04:58:02
2017-09-21T13:21:16
null
[ { "alpha_fraction": 0.5927750468254089, "alphanum_fraction": 0.5977011322975159, "avg_line_length": 29.450000762939453, "blob_id": "c5fa104a5d3a90d77e1ec9e9a017a30b9089cc58", "content_id": "1c9ed04e2249af7c4b1dc800af44d2b010021fa9", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 609, "license_type": "permissive", "max_line_length": 77, "num_lines": 20, "path": "/simple_scheduler/scheduler.py", "repo_name": "vjpie2002/ndscheduler", "src_encoding": "UTF-8", "text": "\"\"\"Run the scheduler process.\"\"\"\n\nfrom ndscheduler.server import server\n\n\nclass SimpleServer(server.SchedulerServer):\n\n def post_scheduler_start(self):\n # New user experience! Make sure we have at least 1 job to demo!\n jobs = self.scheduler_manager.get_jobs()\n if len(jobs) == 0:\n self.scheduler_manager.add_job(\n job_class_string='simple_scheduler.jobs.sample_job.ShellJob',\n name='QBO.Restart Job',\n pub_args=[\"/mnt/scheduler/bin/qbo.restart\"],\n minute='*/1')\n\n\nif __name__ == \"__main__\":\n SimpleServer.run()\n" }, { "alpha_fraction": 0.5555555820465088, "alphanum_fraction": 0.5728394985198975, "avg_line_length": 14.576923370361328, "blob_id": "06b01d51746ea6d203439f3398122eb35c0e6578", "content_id": "ea1ef167d8d9bcda55989a759e58b004bcf42299", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 405, "license_type": "permissive", "max_line_length": 41, "num_lines": 26, "path": "/simple_scheduler/docker/crons/qbo.restart", "repo_name": "vjpie2002/ndscheduler", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nSTOP_OPTION=none\n\nif [ \"x$1\" != \"x\" ]; then\n STOP_OPTION=$1\nfi\n\ncd `dirname $0`/.. || exit 1\nhier=`pwd`\n\n#\n# Restart qbo client on current server\n#\n\necho \"`date +%Y%m%d.%H%M%S` $0: Started\"\n\nif [ \"$STOP_OPTION\" = \"tdump\" ]; then\n $SUDO \"$hier/bin/qbo.stop tdump\"\nelse\n $SUDO $hier/bin/qbo.stop\nfi\nsleep 1\n$SUDO $hier/bin/qbo.start\n\necho \"`date +%Y%m%d.%H%M%S` $0: Finished\"\n" }, { "alpha_fraction": 0.6073697805404663, "alphanum_fraction": 0.6149936318397522, "avg_line_length": 23.984127044677734, "blob_id": "ef1a711d28724a87b4636ec70269157c13128f61", "content_id": "6b16bfdb50bbc650cee58a6aa9e3845cff62f2eb", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1574, "license_type": "permissive", "max_line_length": 102, "num_lines": 63, "path": "/simple_scheduler/docker/crons/qbo.start", "repo_name": "vjpie2002/ndscheduler", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\n\necho \"`date +%Y%m%d.%H%M%S` $0: Started\"\n\nhost=`hostname | sed -e's/\\..*//'`\n\nbindir=`dirname $0`\nhier=`cd $bindir/.. && pwd`\nproduct=`echo $hier | sed -e's/.*\\///'`\ncluster=`echo $product | sed -e's/qboc/c/' -e's/qbo/c1/'`\n\n\[email protected]\nexport JSERV_LOOP_NOTIFY\n\nif test -d $hier/tomcat ; then\n QBNSERVER=tomcat\nfi\n\nJAVA14=1\nexport JAVA14\n\njavacmd=\nif [ -h /usr/local/java-$product ]; then\n javacmd=\"-java /usr/local/java-$product/bin/java\"\nfi\n\n# Watch the startup, and block/unblock the tcp port until it's online\nif [ -x /ops/bin/qbo_guard.pl ]; then \n # If in DR mode, do not run qbo_guard.pl\n if [ ! -f /etc/qbo/qbo.this-is-dr-site.${cluster} ]; then\n nohup sudo /ops/bin/qbo_guard.pl $product start &\n fi\nelse\n echo \"ERROR: /ops/bin/qbo_guard.pl is missing!\"\nfi\n\nif [ -n \"$SUDO\" ]; then\n cd /tmp # for AD file perms issues\n $SUDO \"$bindir/tomcat.start -salsa $javacmd\"\nelse\n $bindir/tomcat.start -salsa $javacmd\nfi\n\nt=`grep -v '^\\s*#' $hier/conf/hosts.conf | awk '$2==\"app\" { print $1}' | cut -d\\. -f1| grep \"^$host$\"`\nif [ -z \"$t\" ]; then\n echo\n echo\n echo \"ERROR: $hier/conf/hosts.conf missing $host app configuration\"\n echo\n echo\n if [ -n \"$SUDO\" ]; then\n cd /tmp # for AD permissions issues\n echo \"$hier/conf/hosts.conf missing $host app configuration\" | \\\n /bin/mail -s \"Bad hosts.conf\" [email protected]\n else\n echo \"$hier/conf/hosts.conf missing $host app configuration\" | \\\n $SUDO \"/bin/mail -s \\\"Bad hosts.conf\\\" [email protected]\"\n fi\nfi\n\necho \"`date +%Y%m%d.%H%M%S` $0: Finished\"\n" }, { "alpha_fraction": 0.5524752736091614, "alphanum_fraction": 0.5613861680030823, "avg_line_length": 26.29729652404785, "blob_id": "0b3aa7de2209d345ece30dc9aa8bf54ee52797cc", "content_id": "7711e663f2d9eea54994203080d477aa63f1677b", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2020, "license_type": "permissive", "max_line_length": 131, "num_lines": 74, "path": "/simple_scheduler/docker/crons/qbo.stop", "repo_name": "vjpie2002/ndscheduler", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nSTOP_OPTION=none\n\nif [ \"x$1\" != \"x\" ]; then\n STOP_OPTION=$1\nfi\n\nPATH=/l/bin:/usr/local/f7/bin:/bin:$PATH\nexport PATH\n\necho \"`date +%Y%m%d.%H%M%S` $0: Started\"\n\nbindir=`dirname $0`\nhier=`cd $bindir/.. && pwd`\nhiername=`basename $hier`\nproduct=`echo $hier | sed -e's/.*\\///'`\n\nif test -d $hier/tomcat ; then\n QBNSERVER=tomcat\nfi\n\njavacmd=\n\nif [ -h /usr/local/java-$product ]; then\n javacmd=\"-java /usr/local/java-$product/bin/java\"\nfi\nserver=tomcat\n\nlogdir=/l/httpd/logs/$hiername/$server\nserver_pidfile=$logdir/$server.pid\nloop_pidfile=$logdir/loop.pid\ncount=3\ndelay=5\n\nif [ \"$STOP_OPTION\" != \"quick\" ]; then\n if [ -x /ops/bin/qbo_guard.pl ]; then\n sudo /ops/bin/qbo_guard.pl $product stop\n else\n echo \"ERROR: /ops/bin/qbo_guard.pl is missing!\"\n fi\n\n if [ \"$STOP_OPTION\" = \"tdump\" ]; then\n pid=`cat $server_pidfile`\n echo \"Taking thread-dumps before stopping the instance. pid : $pid \"\n while [ $count -gt 0 ]\n do\n /usr/local/java-$product/bin/jstack -F $pid >> $logdir/jstack.$pid.$(date +%Y%m%d).dmp & dumppid=$!\n ( sleep $delay && kill -HUP $dumppid ) 2>/dev/null\n let count--\n done\n fi\nfi\n\nif [ -n \"$SUDO\" ]; then\n cd /tmp # done because of permission issues on different dir and because of AD\n $SUDO \"$bindir/$server.stop $javacmd\"\nelse\n $bindir/$server.stop $javacmd\nfi\n\nwhile test -s $loop_pidfile && kill -0 \"`cat $loop_pidfile`\" 2>/dev/null; do\n echo \"$0 - waiting for $server.loop [`cat $loop_pidfile`] to terminate\"\n proc -h \"`cat $loop_pidfile`\"\n sleep 2\ndone\n\nwhile test -s $server_pidfile && kill -0 \"`cat $server_pidfile`\" 2>/dev/null; do\n echo \"$0 - waiting for $server [`cat $server_pidfile`] to terminate\"\n proc -h \"`cat $server_pidfile`\"\n sleep 2\ndone\n\necho \"`date +%Y%m%d.%H%M%S` $0: Finished\"\n" }, { "alpha_fraction": 0.6826782822608948, "alphanum_fraction": 0.7030567526817322, "avg_line_length": 30.18181800842285, "blob_id": "ab1792bca62e93d5d2abade53e5d4c7da1426315", "content_id": "f682d4d4191933b8e60ff02568490ae8c54fc954", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 687, "license_type": "permissive", "max_line_length": 88, "num_lines": 22, "path": "/simple_scheduler/docker/Dockerfile", "repo_name": "vjpie2002/ndscheduler", "src_encoding": "UTF-8", "text": "FROM ubuntu:14.04\n\nMAINTAINER Vijay Dubey <[email protected]>\n\nRUN apt-get -qq update && \\\n apt-get -qq install python-virtualenv git && \\\n apt-get clean && \\\n rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\nRUN virtualenv /mnt/scheduler && \\\n . /mnt/scheduler/bin/activate && \\\n pip install -e git+https://github.com/vjpie2002/ndscheduler.git#egg=ndscheduler && \\\n pip install -r /mnt/scheduler/src/ndscheduler/simple_scheduler/requirements.txt\n\nADD apns.pem /mnt/scheduler/\nADD crons/* /mnt/scheduler/bin/\nRUN chmod 777 /mnt/scheduler/bin/*\n\nADD run_scheduler /mnt/scheduler/bin/run_scheduler\nRUN chmod 755 /mnt/scheduler/bin/run_scheduler\n\nCMD [\"/mnt/scheduler/bin/run_scheduler\"]\n\n" }, { "alpha_fraction": 0.670412003993988, "alphanum_fraction": 0.6762002110481262, "avg_line_length": 32.75862121582031, "blob_id": "a7bbbc98cc1bfcaffc0331a1a44894783851337b", "content_id": "a1fde0bab68352347b5f76c5108cb59223b381ca", "detected_licenses": [ "BSD-2-Clause" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2937, "license_type": "permissive", "max_line_length": 133, "num_lines": 87, "path": "/simple_scheduler/docker/crons/run_spinnaker.cron", "repo_name": "vjpie2002/ndscheduler", "src_encoding": "UTF-8", "text": "#!/bin/sh\n# Wrapper script to run crons on AWS\n# Usage: /app/qbo-app/bin/run_spinnaker.cron <script> [script-args]\n# E.g. /app/qbo-app/bin/run_spinnaker.cron daily-companyupdates_spinnaker.cron Weekends\n\ncd `dirname $0`/.. || exit 2\nhier=`pwd`\nPATH=/usr/local/bin:$hier/bin:/bin:$PATH\nexport PATH\n\nif [ $# -eq 0 ]; then\n echo \"@@@ No arguments passed ...exiting @@@\"\n exit 2\nfi\n\ncmd=$1\nshift\ntool=`basename $cmd`\ntoday=`date \"+%Y%m%d-%H%M\"`\n\n# Safety net: Do not trigger crons for inactive or passivated stacks in AWS\nPASSIVATE_LOG=$hier/logs/cron_activate_passivate.log\nIS_INACTIVE=\"true\"\n\nif [ -f $hier/conf/dynamic_properties/qbo.dynamic.properties ]; then\n prop_value=`cat $hier/conf/dynamic_properties/qbo.dynamic.properties | grep qbo.this.cluster.is.inactive | tail -1 | cut -d'=' -f2`\n if [[ \"$prop_value\" = \"false\" || -z $prop_value ]]; then\n IS_INACTIVE=\"false\"\n fi\nelse\n echo \"@@@ $hier/conf/dynamic_properties/qbo.dynamic.properties not found ...exiting @@@\"\n exit 2\nfi\n\nif [ -f /etc/intu_metadata/app.ini ]; then\n IS_AWS=\"true\"\n # Wrapper to fetch cluster params\n . $hier/bin/fetchClusterParams.sh\nelse\n echo \"@@@ Metadata file: \\'/etc/intu_metadata/app.ini\\' not found ...exiting @@@\"\n exit 2\nfi\n\nif [ \"$IS_AWS\" == \"true\" -a \"$IS_INACTIVE\" == \"true\" ]; then\n echo \"`date`: ${cluster} AWS stack is \\\"Inactive\\\". Crons are passivated [job-name: $tool]\" >> $PASSIVATE_LOG\n exit 0\nfi\n\n# Make sure cron directory exists else create it & ensure permissions are set\ntest -d $hier/logs/cron || mkdir -p $hier/logs/cron\nchown app:appeng $hier/logs/cron\nlog_file=$hier/logs/cron/cronlog.$tool.$today\n\n# Abort if cron is triggered from other host other than admin-app\nif [ \"${role}-${subrole}\" != \"admin-app\" ]; then\n echo \"@@@ Cronjobs must be triggered only from admin-app ...exiting @@@\"\n exit 2\nfi\n\n# Check for left-over or hung cronjobs\nreal_name=$hier/bin/$tool\nmessage=\"Environment: ${app_env} Cluster: ${cluster} Region: ${region}\"\nprocs=`ps -ef | grep $real_name | egrep -v \"$0|grep\"`\nif test -n \"$procs\"; then\n (echo \"@@@ `hostname`: $0 leftover cronjob \\\"$tool\\\" detected. $message. Please fix and re-trigger cron @@@\"; \\\n echo \"$procs\"; \\\n proc $real_name) | \\\n exit 2\nfi\n\n# As we move towards a cluster agnostic directory structure with Spinnaker,\n# print cluster name in the logs for better clarity\necho \"${cluster} run_spinnaker.cron is running from `pwd` at `date`\" | tee $log_file\necho \"Job-name: $tool\" | tee -a $log_file\necho \"Running $cmd $@\" | tee -a $log_file\n$cmd \"$@\" | tee -a $log_file\n\n# Return proper exit code if cronjob fails\n# $PIPESTATUS is an array that holds the exit status of each command in your last foreground pipeline of commands\nret_code=`echo ${PIPESTATUS[0]}`\nif [ $ret_code != 0 ]; then\n echo \"@@@ ${cluster}: $tool job failed to run @@@\"\n exit 2\nfi\n\n# For successful run of cron\necho \"${cluster} run_spinnaker.cron finished at `date`\" | tee -a $log_file\n" } ]
6
salimmoulouel/english-dictionary
https://github.com/salimmoulouel/english-dictionary
65f9c08b7e2a5d2451132ff9e437f590c9383694
dddd557d550bb887185ce223d30c6922099a0cf7
66cefc7fa91939bbf76c39f3fe444cad1c0129eb
refs/heads/master
2023-02-16T05:26:20.191486
2021-01-15T20:59:55
2021-01-15T20:59:55
330,020,757
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5941343307495117, "alphanum_fraction": 0.5969725847244263, "avg_line_length": 26.102563858032227, "blob_id": "85b7c7a915e72a9074509f0205ed3d3e10bfd47f", "content_id": "58c4b7138bd5d2543baf235acb52c569a6d73059", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1057, "license_type": "no_license", "max_line_length": 77, "num_lines": 39, "path": "/dict_source_reader.py", "repo_name": "salimmoulouel/english-dictionary", "src_encoding": "UTF-8", "text": "\"\"\"\nthis application provide the definition of different words in english\nit's a kind of dictionary, this is the first version (terminal mode).\nnext i will try to add graphical interface and database acess\n\"\"\"\nimport json\nfrom difflib import get_close_matches\n\n\ndata = json.load(open(\"data/data.json\"))\n\n\ndef translate(w):\n w = w.lower()\n if w in data:\n return data[w]\n else:\n matches = get_close_matches(w, data.keys())\n if(len(matches) > 0):\n response = input(\"did you mean {} instead y for yes and n for no\"\n .format(matches[0]))\n if(response == \"y\"):\n return data[matches[0]]\n elif(response == \"N\"):\n return \"the word does'nt exist\"\n else:\n return \"we didn't understand the entry\"\n return \"The word doesn't exist. Please check it.\"\n\n\nword = input(\"enter a word: \")\n\noutpout = translate(word)\n\nif type(outpout) == list:\n for translation in outpout:\n print(translation)\nelse:\n print(outpout)\n" } ]
1
Fanshenium/amvnews-download
https://github.com/Fanshenium/amvnews-download
88d2d64e18b5309e550e794008cb9c02905dceba
47b5470436acfed3fbeb43c99ededa3e7fef5449
b6a2e9db942dafb23717d2c23a75429d78c83db4
refs/heads/master
2019-07-11T23:38:24.501232
2015-09-12T14:53:59
2015-09-12T14:53:59
42,359,464
1
0
null
2015-09-12T14:16:03
2013-09-30T13:04:33
2013-09-06T11:32:08
null
[ { "alpha_fraction": 0.6031178832054138, "alphanum_fraction": 0.6073400378227234, "avg_line_length": 27.509260177612305, "blob_id": "e87216fc2d603593e58af49b94635f026837c365", "content_id": "fafcf14dd72a22a7437bd17612dd3cef9bd3ca68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3079, "license_type": "no_license", "max_line_length": 80, "num_lines": 108, "path": "/core/views.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "import requests\n\nfrom django.shortcuts import redirect, render, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse\nfrom django.db.models import Q\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom lxml.html import fromstring\nfrom lxml import etree\n\nfrom .models import AMV, UserToAMV\nfrom .forms import AMVSearchForm\n\n\ndef search_results(request):\n query = request.GET.get('q')\n req = requests.get(\n 'http://amvnews.ru/index.php',\n params={\n 'go': 'Search',\n 'modname': 'Files',\n 'query': query\n }\n )\n doc = fromstring(req.text)\n amv_links = doc.xpath('//td[@valign=\"top\"]/b/a')\n amv_links = [\n {'title': l.text, 'id': l.attrib['href'].split('&')[-1].split('=')[1]}\n for l in amv_links\n ]\n return render(request, 'core/search_results.html', {\n 'object_list': amv_links,\n 'form': AMVSearchForm()\n })\n\n\ndef add_amv(request, amv_id):\n amv, new = AMV.objects.get_or_create(amvnews_id=amv_id)\n if new:\n doc = fromstring(requests.get(amv.get_url()).text, 'http://amvnews.ru/')\n doc.make_links_absolute('http://amvnews.ru/')\n image_url = doc.xpath('//p[@align=\"center\"]/img')[0].attrib['src'][17:]\n print image_url\n info = ''.join(map(\n etree.tostring, doc.xpath('//p[@align=\"justify\"]')\n ))\n amv.image_url = image_url\n amv.title = doc.xpath('//td[@class=\"newstitle\"]')[0].text.strip()\n amv.info = info\n amv.save()\n amvtouser, new = UserToAMV.objects.get_or_create(\n amv=amv,\n user=request.user,\n defaults={\n 'position': UserToAMV.objects.filter(user=request.user).count()\n }\n )\n if not new:\n return HttpResponse('')\n return render(request, 'core/object.html', {\n 'object': amv\n })\n\n\n@login_required\ndef index(request):\n return render(request, 'core/index.html', {\n 'object_list': AMV.objects.filter(\n usertoamv__user=request.user\n ).order_by('-usertoamv__position')\n })\n\n\n@login_required\ndef object_list(request):\n return render(request, 'core/object_list.html', {\n 'object_list': AMV.objects.filter(usertoamv__user=request.user)\n }).order_by('-usertoamv__position')\n\n\n@login_required\ndef delete_amv(request, pk):\n UserToAMV.objects.filter(user=request.user, amv_id=pk).delete()\n return redirect('/')\n\n\n@login_required\ndef play_amv(request, pk):\n obj = get_object_or_404(AMV, pk=pk)\n r = requests.head(obj.get_download_url())\n ext = r.headers['location'].split('.')[-1]\n return render(request, 'core/play_amv.html', {\n 'object': obj,\n 'added': bool(UserToAMV.objects.filter(user=request.user, amv=obj)),\n 'ext': ext\n })\n\n\n@csrf_exempt\n@login_required\ndef sort(request):\n for key, value in request.POST.items():\n UserToAMV.objects.filter(\n user=request.user,\n amv_id=value\n ).update(position=key)\n return HttpResponse('OK')\n" }, { "alpha_fraction": 0.7463884353637695, "alphanum_fraction": 0.7463884353637695, "avg_line_length": 30.149999618530273, "blob_id": "f8c99e8f8ac8addc9b35777e876b44f7dc13d598", "content_id": "6327bb3b3d9c4766f7bd567ae632a11c724b5472", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 623, "license_type": "no_license", "max_line_length": 87, "num_lines": 20, "path": "/project/urls.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.conf.urls.static import static\nfrom django.conf import settings\n\nfrom registration.forms import RegistrationFormUniqueEmail\n\nfrom auth.views import RegistrationView\n\n\nadmin.autodiscover()\n\nurlpatterns = patterns(\n '',\n url(r'^admin/', include(admin.site.urls)),\n\n url(r'^', include('core.urls')),\n url(r'^auth/register/$', RegistrationView.as_view(), name='registration_register'),\n url(r'^auth/', include('registration.backends.simple.urls')),\n) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n" }, { "alpha_fraction": 0.6428571343421936, "alphanum_fraction": 0.738095223903656, "avg_line_length": 13, "blob_id": "98422a4318a1b565099ac490b4540858198e38a0", "content_id": "eb319d7245bc0ee3b7e8e815ef4aec7cab71c45d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 42, "license_type": "no_license", "max_line_length": 24, "num_lines": 3, "path": "/requirements.txt", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "--requirement=common.txt\n\npsycopg2==2.4.5\n" }, { "alpha_fraction": 0.7623456716537476, "alphanum_fraction": 0.7623456716537476, "avg_line_length": 28.454545974731445, "blob_id": "dea7c2fdd9b0d3f3a381356919c8dfa045a61c19", "content_id": "48f1acbe66600af806b97bbe9c678494d7715c78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 324, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/auth/views.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "from registration.backends.simple.views import RegistrationView\nfrom registration.forms import RegistrationFormUniqueEmail\n\n\nclass RegistrationView(RegistrationView):\n\n def get_form_class(self, *args, **kwargs):\n return RegistrationFormUniqueEmail\n\n def get_success_url(self, request, user):\n return '/'\n" }, { "alpha_fraction": 0.5958083868026733, "alphanum_fraction": 0.601047933101654, "avg_line_length": 25.19607925415039, "blob_id": "a504fc21874a5bac185bb0c681dc99a65b1558c6", "content_id": "6108607657a6a286aee55cda6d79ff89e8bc65a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1336, "license_type": "no_license", "max_line_length": 73, "num_lines": 51, "path": "/core/models.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass AMV(models.Model):\n\n users = models.ManyToManyField(User, through='UserToAMV')\n title = models.CharField(max_length=255)\n amvnews_id = models.IntegerField()\n image_url = models.CharField(max_length=255)\n info = models.TextField()\n\n class Meta:\n verbose_name = 'AMV'\n verbose_name_plural = 'AMV'\n ordering = ['-pk']\n\n def get_download_url(self):\n return 'http://amvnews.ru/index.php?go=Files&file=down&id=%s' % \\\n self.amvnews_id\n\n def get_url(self):\n return 'http://amvnews.ru/index.php?go=Files&in=view&id=%s' % \\\n self.amvnews_id\n\n def get_users_list(self):\n s = '<ul>'\n for user in self.user.all():\n s += '<li>%s</li>' % user.username\n s += '</ul>'\n return s\n\n get_users_list.allow_tags = True\n\n def __unicode__(self):\n return self.title\n\n @models.permalink\n def get_delete_url(self):\n return ('delete_amv', (self.pk, ), {})\n\n @models.permalink\n def get_play_url(self):\n return ('core.views.play_amv', (self.pk, ), {})\n\n\nclass UserToAMV(models.Model):\n\n user = models.ForeignKey(User)\n amv = models.ForeignKey(AMV)\n position = models.PositiveIntegerField(default=0, db_index=True)\n" }, { "alpha_fraction": 0.6510263681411743, "alphanum_fraction": 0.6510263681411743, "avg_line_length": 33.099998474121094, "blob_id": "f73bfff0153adcbf613b9a0f7a5ae80cc479c0c2", "content_id": "05bf11a317c2aec69689cf53a817dad5b50daef9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 70, "num_lines": 10, "path": "/auth/urls.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, url\n\nurlpatterns = patterns(\n '',\n\n url(r'^register$', 'auth.views.register', name='register'),\n url(r'^login$', 'django.contrib.auth.views.login', name='login'),\n url(r'^logout$', 'django.contrib.auth.views.logout',\n {'template_name': 'registration/logout.html'}, name='logout'),\n)\n" }, { "alpha_fraction": 0.7325581312179565, "alphanum_fraction": 0.7325581312179565, "avg_line_length": 16.200000762939453, "blob_id": "df5521825ac301e4bc1acd85bc59f71fc53e1f98", "content_id": "09da77928a02a8acc6fd0856c166191a9a890ce8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 86, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/core/forms.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "from django import forms\n\n\nclass AMVSearchForm(forms.Form):\n q = forms.CharField()\n" }, { "alpha_fraction": 0.5679012537002563, "alphanum_fraction": 0.5679012537002563, "avg_line_length": 36.30769348144531, "blob_id": "876a4f6d6c95c4e5bdab777c7c92844698382231", "content_id": "1b6f2f1e786354d5d5c97c4a5de7379ff320a28f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 486, "license_type": "no_license", "max_line_length": 66, "num_lines": 13, "path": "/core/urls.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, url\n\nurlpatterns = patterns(\n 'core.views',\n\n url(r'^$', 'index', name='index'),\n url(r'^add/(?P<amv_id>\\d+)$', 'add_amv', name='add_amv'),\n url(r'^delete/(?P<pk>\\d+)$', 'delete_amv', name='delete_amv'),\n url(r'^results$', 'search_results', name='search_results'),\n url(r'^object_list$', 'object_list', name='object_list'),\n url(r'^play/(?P<pk>\\d+)$', 'play_amv', name='play_amv'),\n url(r'^sort$', 'sort', name='sort'),\n)\n\n" }, { "alpha_fraction": 0.6767241358757019, "alphanum_fraction": 0.6767241358757019, "avg_line_length": 18.33333396911621, "blob_id": "8955459c4df8a6deab9ba6982dc71a44a57d31fb", "content_id": "34916070159e60f163693a3be4fa1c12465a76c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 232, "license_type": "no_license", "max_line_length": 58, "num_lines": 12, "path": "/core/admin.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import AMV, UserToAMV\n\nadmin.site.register(\n AMV,\n list_display=['title', 'amvnews_id', 'get_users_list']\n)\n\nadmin.site.register(\n UserToAMV,\n list_display=['user', 'amv']\n)\n" }, { "alpha_fraction": 0.5738636255264282, "alphanum_fraction": 0.5795454382896423, "avg_line_length": 16.600000381469727, "blob_id": "d25c76ed6f25aa6ba719af9f0da7d85a604f432c", "content_id": "c795ac88dcd89163f6981bc6c5d4dea5f4ae5761", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 176, "license_type": "no_license", "max_line_length": 59, "num_lines": 10, "path": "/project/settings/production.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "from common import *\n\nDEBUG = TEMPLATE_DEBUG = False\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'amv',\n }\n}\n" }, { "alpha_fraction": 0.6045548915863037, "alphanum_fraction": 0.6045548915863037, "avg_line_length": 23.149999618530273, "blob_id": "a79c3bfd45f9964aeeb1b3f9271fe5aef2c52a5c", "content_id": "b9e97e1a2f92c279fe3bd405dbaf44d73f81110f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 483, "license_type": "no_license", "max_line_length": 66, "num_lines": 20, "path": "/core/management/commands/parse_info.py", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "import requests\n\nfrom django.core.management.base import BaseCommand\n\nfrom lxml.html import fromstring\nfrom lxml import etree\n\nfrom core.models import AMV\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **kwargs):\n qs = AMV.objects.all()\n for obj in qs:\n doc = fromstring(requests.get(obj.get_url()).text)\n obj.info = ''.join(map(\n etree.tostring, doc.xpath('//p[@align=\"justify\"]')\n ))\n obj.save()\n" }, { "alpha_fraction": 0.47145670652389526, "alphanum_fraction": 0.4812992215156555, "avg_line_length": 27.3798885345459, "blob_id": "ade6dda0ca97ec1119e68380dda30d36b408ea00", "content_id": "cd83efa038263f7268a7abe7ccbbef30cfd21d4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5080, "license_type": "no_license", "max_line_length": 92, "num_lines": 179, "path": "/project/static/js/main.js", "repo_name": "Fanshenium/amvnews-download", "src_encoding": "UTF-8", "text": "// Generated by CoffeeScript 1.6.3\njQuery(function() {\n var Hash, hash, search;\n Hash = (function() {\n function Hash() {\n this._data = {};\n this.deserialize();\n }\n\n Hash.prototype.serialize = function() {\n var key, value;\n this.hash = '#' + ((function() {\n var _ref, _results;\n _ref = this._data;\n _results = [];\n for (key in _ref) {\n value = _ref[key];\n if (value.length > 0) {\n _results.push(key + '=' + value);\n }\n }\n return _results;\n }).call(this)).join('&');\n if (this.hash === '#') {\n this.hash = '#!';\n }\n return document.location.hash = this.hash;\n };\n\n Hash.prototype.deserialize = function() {\n var a, data, i, _i, _len, _results;\n this.hash = document.location.hash;\n this._data = {};\n data = this.hash.replace('#', '').split('&');\n _results = [];\n for (_i = 0, _len = data.length; _i < _len; _i++) {\n i = data[_i];\n a = i.split('=');\n if (a.length === 2 && a[0] !== void 0 && a[1] !== void 0) {\n _results.push(this._data[a[0]] = a[1]);\n } else {\n _results.push(void 0);\n }\n }\n return _results;\n };\n\n Hash.prototype.data = function(key, value) {\n if (key === void 0 && value === void 0) {\n return this._data;\n } else if (value !== void 0 && key !== void 0) {\n this._data[key] = value.length > 0 ? value : '';\n return this.serialize();\n } else if (key !== void 0) {\n return this._data[key];\n }\n };\n\n return Hash;\n\n })();\n hash = new Hash();\n $('#elements').on('click', '.item2', function(event) {\n hash.data('amv', $(this).data('id').toString());\n return $.get($(this).data('url'), function(data) {\n $('#play_modal .modal-body').html(data);\n $('#play_modal .modal-header h3').html($('#play_modal .modal-body .h3-title').html());\n return $('#play_modal').modal();\n });\n });\n $('#play_modal').on('hidden', function(event) {\n $(this).find('.modal-body').html('');\n return hash.data('amv', '');\n });\n $('#elements').on('click', '.item2 .item2delete', function(event) {\n $.get($(this).find('a').attr('href'));\n $(this).closest('.item2').remove();\n return false;\n });\n $('#add_amv').on('click', function(event) {\n return $('#add_amv_modal').modal();\n });\n $('#search-form').on('submit', function(event) {\n var form;\n form = $(this);\n $.ajax({\n type: form.attr('method'),\n url: form.attr('action'),\n data: form.serialize(),\n success: function(data) {\n return $('#results').html(data);\n }\n });\n return false;\n });\n $('#results').on('click', '.add_amv', function(event) {\n $.get($(this).val(), function(data) {\n return $('#elements').prepend(data);\n });\n $(this).closest('tr').remove();\n return event.preventDefault();\n });\n search = function() {\n var str;\n str = $('#inputSearch').val();\n hash.data('search', str.toString());\n if (str !== '') {\n $('.item2').each(function() {\n if (($(this).attr('data-title').toLowerCase()).indexOf(str.toLowerCase()) === -1) {\n return $(this).removeClass('true-search').addClass('non-search');\n } else {\n return $(this).removeClass('non-search').addClass('true-search');\n }\n });\n $('.non-search').stop().animate({\n 'opacity': 0\n }, 250, function() {\n return $(this).hide();\n });\n } else {\n $('.item2').removeClass('non-search').addClass('true-search');\n }\n return $('.true-search').show().stop().animate({\n 'opacity': 1\n }, 250);\n };\n $('#inputSearch').keyup(function() {\n return search();\n });\n if (hash.data('search')) {\n $('#inputSearch').val(hash.data('search'));\n search();\n }\n if (hash.data('amv')) {\n $.get('/play/' + hash.data('amv'), function(data) {\n $('#play_modal .modal-body').html(data);\n $('#play_modal .modal-header h3').html($('#play_modal .modal-body .h3-title').html());\n $('#play_modal').modal();\n return $('.add-amv').click(function(e) {\n console.log(1);\n $.get($(this).val(), function(data) {\n return $('#elements').prepend(data);\n });\n $(this).remove();\n return false;\n });\n });\n }\n $(\"#elements\").sortable({\n helper: 'clone',\n cursor: 'move',\n distance: 20,\n zIndex: 100,\n reverte: 300,\n update: function(event, ui) {\n var arr, data, i, id, _i, _len;\n arr = $('#elements').sortable('toArray', {\n key: 'amv',\n attribute: 'data-id'\n }).reverse();\n data = {};\n i = 0;\n for (_i = 0, _len = arr.length; _i < _len; _i++) {\n id = arr[_i];\n data[i] = id;\n i += 1;\n }\n $('#elements').sortable('disable');\n return $.ajax({\n type: 'POST',\n url: '/sort',\n data: data,\n success: function(data) {\n return $('#elements').sortable('enable');\n }\n });\n }\n });\n});\n" } ]
12
dudupagliaroni/simple_clock
https://github.com/dudupagliaroni/simple_clock
9b151c7cd4e9f12ed87be6da599f574ba786c645
8e80f76b897ea5a2c4cf9a972bb879205d601b83
36b462be5c56c3872d5dbbaeb94f9413fe2feec1
refs/heads/master
2023-08-06T02:26:25.721647
2021-10-06T05:04:13
2021-10-06T05:04:13
285,978,008
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5845283269882202, "alphanum_fraction": 0.6867924332618713, "avg_line_length": 42.442623138427734, "blob_id": "3973d3e864a5ce1ddd11b40ced15b823cc1d7b7f", "content_id": "b45c17f5e99bb8c731bc6d797fa6223f999f7a6f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2651, "license_type": "permissive", "max_line_length": 148, "num_lines": 61, "path": "/simple clock.py", "repo_name": "dudupagliaroni/simple_clock", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport datetime\n\nroot = Tk()\nroot.title('Relógio')\nx = 600 \ny = 600\ncanvas = Canvas(root, width=x, height=y, bg='#4682B4')\ncanvas.pack()\n\narc_sec = canvas.create_arc(240, 240, x-240, y-240, start=90, extent=-datetime.datetime.now().second*6, style=ARC, width=30, outline='#F9AE17')\narc_min = canvas.create_arc(200, 200, x-200, y-200, start=90, extent=-datetime.datetime.now().minute*6, style=ARC, width=30, outline='#F9AE17')\narc_hour = canvas.create_arc(160, 160, x-160, y-160, start=90, extent=-datetime.datetime.now().hour*30, style=ARC, width=30, outline='#F9AE17')\narc_day = canvas.create_arc(120, 120, x-120, y-120, start=90, extent=-datetime.datetime.now().day*11.61, style=ARC, width=30, outline='#F9AE17')\narc_month = canvas.create_arc(80, 80, x-80, y-80, start=90, extent=-datetime.datetime.now().month*30, style=ARC, width=30, outline='#F9AE17')\narc_year = canvas.create_arc(40, 40, x-40, y-40, start=90, extent=-datetime.datetime.now().year*3.6, style=ARC, width=30, outline='#F9AE17')\n\n\ndef refresh_sec():\n global arc_sec\n canvas.delete(arc_sec)\n arc_sec = canvas.create_arc(240, 240, x-240, y-240, start=90, extent=-datetime.datetime.now().second*6, style=ARC, width=30, outline='#F9AE17')\n canvas.after(1000, refresh_sec)\n\ndef refresh_min():\n global arc_min\n canvas.delete(arc_min)\n arc_min = canvas.create_arc(200, 200, x-200, y-200, start=90, extent=-datetime.datetime.now().minute*6, style=ARC, width=30, outline='#F9AE17')\n canvas.after(1000, refresh_min)\n\ndef refresh_hour():\n global arc_hour\n canvas.delete(arc_hour)\n arc_hour = canvas.create_arc(160, 160, x-160, y-160, start=90, extent=-datetime.datetime.now().hour*30, style=ARC, width=30, outline='#F9AE17')\n canvas.after(1000, refresh_hour) \n\ndef refresh_day():\n global arc_day\n canvas.delete(arc_day)\n arc_day = canvas.create_arc(120, 120, x-120, y-120, start=90, extent=-datetime.datetime.now().day*11.61, style=ARC, width=30, outline='#F9AE17')\n canvas.after(1000, refresh_day) \n\ndef refresh_month():\n global arc_month\n canvas.delete(arc_month)\n arc_month = canvas.create_arc(80, 80, x-80, y-80, start=90, extent=-datetime.datetime.now().month*27, style=ARC, width=30, outline='#F9AE17')\n canvas.after(1000, refresh_month) \n\ndef refresh_year():\n global arc_year\n canvas.delete(arc_year)\n arc_year = canvas.create_arc(40, 40, x-40, y-40, start=90, extent=-datetime.datetime.now().year*3.6, style=ARC, width=30, outline='#F9AE17')\n canvas.after(1000, refresh_year)\n\nrefresh_sec()\nrefresh_min()\nrefresh_hour()\nrefresh_month()\nrefresh_day()\nrefresh_year()\nroot.mainloop()\n" }, { "alpha_fraction": 0.7719298005104065, "alphanum_fraction": 0.7719298005104065, "avg_line_length": 27.5, "blob_id": "4c600a054fbc775856cc054dae0ff7b90e48d3ad", "content_id": "0cdfb0e5c54bb0807d60fd1ee3db0ecf3d059b62", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 59, "license_type": "permissive", "max_line_length": 41, "num_lines": 2, "path": "/README.md", "repo_name": "dudupagliaroni/simple_clock", "src_encoding": "UTF-8", "text": "# simple_clock\n Um relógio simples de segundos até anos!\n" } ]
2
danieel-reis/ai_pacmaze
https://github.com/danieel-reis/ai_pacmaze
c708d66082b6a397f7bcc26130bda152ec88853e
ee2f57209db1cd80c8266d99e9cb9900de7c2351
1478b242a486b774e397076a0e8de015f9461037
refs/heads/master
2022-04-13T15:52:07.414074
2020-02-22T17:50:14
2020-02-22T17:50:14
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6803820729255676, "alphanum_fraction": 0.7237325310707092, "avg_line_length": 29.93181800842285, "blob_id": "b1f2f0df7cbb0828836afb16c3db2e840e5d4de7", "content_id": "37974768ef328d30d13cc7adae73ea4d8de4c19a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1368, "license_type": "no_license", "max_line_length": 120, "num_lines": 44, "path": "/README.md", "repo_name": "danieel-reis/ai_pacmaze", "src_encoding": "UTF-8", "text": "# Resolvedor do N-puzzle utilizando Inteligência Artificial\n\n## Execução e testes\n\nSegue um exemplo:\n\nEstado inicial:\n```\n[8 6 7]\n[2 5 4]\n[3 0 1]\n```\n\nEstado objetivo:\n```\n[1 2 3]\n[4 5 6]\n[7 8 0]\n```\n\nObserve abaixo como executar o código:\n```\npython3 tp_ia_daniel_reis.py dim_x dim_y estado_inicial estado_final algoritmo name_save\npython3 tp_ia_daniel_reis.py 3 3 [[8,6,7],[2,5,4],[3,0,1]] [[1,2,3],[4,5,6],[7,8,0]] 0 31\n```\n\nEm que:\n* dim_x e dim_y remetem ao tamanho da matriz;\n* estado_inicial remete aos valores da matriz inicial;\n* estado_final remete aos valores da matriz objetivo;\n* algoritmo se refere ao algoritmo escolhido para executar, como descrito logo abaixo;\n* name_save se refere ao nome do arquivo a ser salvo com a solução do problema obtida pelo(s) algoritmo(s) executado(s).\n\nDescreve-se os parâmetros do algoritmo:\n* 0 - Executar todos os algoritmos;\n* 1 - Executar apenas o Breadth First Search;\n* 2 - Executar apenas o Uniform Cost Search;\n* 3 - Executar apenas o Iterative Deepening Search;\n* 4 - Executar apenas o A Star Search Heuristic 1;\n* 5 - Executar apenas o A Star Search Heuristic 2;\n* 6 - Executar apenas o Greedy Best First Search Heuristic 1;\n* 7 - Executar apenas o Greedy Best First Search Heuristic 2;\n* 8 - Executar apenas o Hill Climbing Search Heuristic 1;\n* 9 - Executar apenas o Hill Climbing Search Heuristic 2.\n" }, { "alpha_fraction": 0.5164281725883484, "alphanum_fraction": 0.5227698087692261, "avg_line_length": 55.66666793823242, "blob_id": "2fd986cca9f3d2cf373e8610ea0d96968d6ed3b5", "content_id": "6c204331b576aa0c90c364a5740f4dd298890e41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56986, "license_type": "no_license", "max_line_length": 214, "num_lines": 999, "path": "/tp_ia_daniel_reis.py", "repo_name": "danieel-reis/ai_pacmaze", "src_encoding": "UTF-8", "text": "#python tp_ia_daniel_reis.py 3 3 [[1,2,3],[4,5,6],[7,8,0]] [[1,2,3],[4,5,6],[7,8,0]] 0 1\n#python tp_ia_daniel_reis.py 4 4 [[1,2,3,4],[5,6,7,8],[9,10,11,12],[15,13,14,0]] [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,0]] 0 1\n\nimport time\nimport random\nfrom copy import copy, deepcopy\nimport sys\n\nsizeRow = int(sys.argv[1])\nsizeColumn = int(sys.argv[2])\n\nfileResult = open('result' + sys.argv[6] + '.txt', 'a')\nfileResultCsv = open('resultCSV' + sys.argv[6] + '.csv', 'a')\nfileResultCsv.write('algorithm,beginTime,endTime,totalTime,pathCost,totalNodes,totalNodesExplored\\n')\n\ndef printTextCSV(algorithm, beginTime, endTime, totalTime, pathCost, totalNodes, totalNodesExplored, totalNodesOpenNotExplored):\n #Insere texto no arquivo\n fileResultCsv.write(str(algorithm) + ',' + str(beginTime) + ',' + str(endTime) + ',' + str(totalTime) + ',' + str(pathCost) + ',' + str(totalNodes) + ',' + str(totalNodesExplored) + '\\n')\n \ndef printText(text):\n print(text) #Imprime texto\n fileResult.write(str(text) + '\\n') #Insere texto no arquivo\n\ndef printTextInline(text):\n print(str(text) + ' ', end='') #Imprime texto inline\n fileResult.write(str(text) + ' ') #Insere texto no arquivo\n \ndef closeFileTXT():\n #fileResult.write('\\n\\n\\n')\n fileResult.close() #Fecha o arquivo\n\ndef closeFileCSV():\n #fileResultCsv.write(',,,,,,,,\\n' + ',,,,,,,,\\n' + ',,,,,,,,\\n')\n fileResultCsv.close() #Fecha o arquivo\n\nclass LoadMatrix: \n def load(matrixInRow): \n #Cria matriz somente com zeros\n M = [] #Cria matriz\n for i in range(sizeRow): #Para cada linha do arquivo\n li = [] #Cria uma lista que vai armazenar uma linha da matriz\n for j in range(sizeColumn): #Para cada coluna dessa linha\n li.append(0) #Preenche cada elemento com zero\n M.append(li) #Adiciona a linha criada a matriz\n \n d = matrixInRow.split(',') #Separa os dados da matriz lida do arquivo\n for i in range(sizeRow): #Para cada linha da matriz\n for j in range(sizeColumn): #Para cada coluna da matriz\n M[i][j] = int(d[sizeRow*i+j].replace('[','').replace(']','')) #Preenche a matriz com os valores lidos do arquivo\n return M #Retorna a matriz preenchida\n\nclass Node:\n def __init__(self, root, matrix, cost, costH):\n self.root = root\n self.matrix = matrix\n self.cost = cost\n self.costH = costH\n\n def setCost(self, cost):\n self.cost = cost\n \n def setCostH(self, costH):\n self.costH = costH\n \n def setRoot(self, root):\n self.root = root\n \n def getCost(self):\n return self.cost\n \n def getCostH(self):\n return self.costH\n \n def getRoot(self):\n return self.root\n \n def getMatrix(self):\n return self.matrix\n\nclass Result:\n def __init__(self, algorithm, timeBegin, timeEnd, lenFrontier, lenExplored, lenNodes, nodes, index):\n self.algorithm = algorithm\n self.timeBegin = timeBegin\n self.timeEnd = timeEnd\n self.lenFrontier = lenFrontier\n self.lenExplored = lenExplored\n self.lenNodes = lenNodes\n self.nodes = nodes\n self.index = index\n \n #Mostrar os resultados\n def printResultSearch(self):\n printText('Results')\n \n listPath = []\n while (self.index >= 0):\n listPath.append(self.index)\n self.index = self.nodes[self.index].getRoot() \n listPath.reverse()\n \n if (len(listPath) > 0):\n printTextInline('Path:')\n for element in listPath:\n printTextInline(element)\n print('')\n for element in listPath:\n OperationsMatrix.printMatrix(self.nodes[element].getMatrix())\n printText('Path cost: ' + str(len(listPath)-1))\n else:\n printText('Path cost: 0')\n \n #printText('Begin time: ' + str(self.timeBegin) + ' ms')\n #printText('End time: ' + str(self.timeEnd) + ' ms')\n printText('Total time: ' + str(self.timeEnd-self.timeBegin) + ' s')\n printText('Total nodes: ' + str(self.lenNodes))\n printText('Total nodes explored: ' + str(self.lenExplored))\n printText('--------------------------------------------')\n printTextCSV(self.algorithm, self.timeBegin, self.timeEnd, (self.timeEnd-self.timeBegin), (len(listPath)-1), (len(self.nodes)), self.lenExplored, self.lenFrontier)\n \n #Mostrar os resultados\n def printResultSearch_(self, path, lenNodes):\n printText('Results')\n if (len(path) > 0):\n printText('Path:')\n for r in path:\n OperationsMatrix.printMatrix(r.getMatrix())\n printText('Path cost: ' + str(len(path)-1))\n else:\n printText('Path cost: 0')\n printText('Total time: ' + str(self.timeEnd-self.timeBegin) + ' s')\n printText('Total nodes: ' + str(lenNodes))\n printText('Total nodes explored: ' + str(self.lenExplored))\n printText('--------------------------------------------')\n printTextCSV(self.algorithm, self.timeBegin, self.timeEnd, (self.timeEnd-self.timeBegin), (len(path)-1), lenNodes, self.lenExplored, self.lenFrontier)\n\nclass OperationsMatrix: \n #Gera matriz aleatória\n def generatorRandomMatrix():\n items = [x for x in range(sizeRow*sizeColumn)] #Gera uma lista com valores de 1 até n\n random.shuffle(items) #Embaralha a lista\n \n #Cria matriz somente com zeros\n M = [] #Cria matriz\n for i in range(sizeRow): #Para cada linha do arquivo\n li = [] #Cria uma lista que vai armazenar uma linha da matriz\n for j in range(sizeColumn): #Para cada coluna dessa linha\n li.append(0) #Preenche cada elemento com zero\n M.append(li) #Adiciona a linha criada a matriz\n \n for i in range(sizeRow): #Para cada linha da matriz\n for j in range(sizeColumn): #Para cada coluna da matriz\n M[i][j] = int(d[sizeRow*i+j]) #Preenche a matriz com os valores lidos do arquivo\n return M #Retorna a matriz preenchida\n\n #Imprime a matriz\n def printMatrix(M):\n for i in range(sizeRow): #Percorre cada linha da matriz\n #printText(M[i]) #Imprime o valor da linha\n printTextInline(M[i]) #Imprime o valor da linha\n printText('')\n \n #Encontra a posição em que se encontra um valor na matriz\n def findPosition(M, value):\n posColumn = -1 #Supõe inicialmente que o valor procurado não está na matriz\n posRow = -1 #Supõe inicialmente que o valor procurado não está na matriz\n for i in range(sizeRow): #Para cada linha da matriz\n for j in range(sizeColumn): #Para cada coluna dessa linha\n if (M[i][j] == value): #Verifica se o elemento da posição atual na matriz é exatamente o elemento procurado\n posRow = i #Se o elemento procurado foi encontrado, guarda a linha em que ele se encontra\n posColumn = j #Se o elemento procurado foi encontrado, guarda a coluna em que ele se encontra\n return (posRow, posColumn) #Retorna a posição do elemento procurado na matriz (linha, coluna)\n \n #Encontra a posição em que se encontra na hash\n def getPosition(M, value):\n pos = 0 #Supõe inicialmente que o espaço em branco está na primeira posição da matriz\n for i in range(sizeRow): #Para cada linha da matriz\n for j in range(sizeColumn): #Para cada coluna dessa linha\n if (M[i][j] == value): #Verifica se o elemento da posição atual na matriz é exatamente o elemento procurado\n return pos #Retorna a posição do elemento procurado na matriz\n pos = pos+1 #Avança o contador\n return 0 #Retorna a posição do elemento procurado na matriz\n \n #Troca elemento de posição na matriz\n def swapElementMatrix(M, posRow1, posColumn1, posRow2, posColumn2):\n aux = M[posRow1][posColumn1] #Guarda o valor da matrix[posRow1][posColumn1] em um valor auxiliar\n M[posRow1][posColumn1] = M[posRow2][posColumn2] #Atualiza a matrix[posRow1][posColumn1] com o valor da matrix[posRow2][posColumn2]\n M[posRow2][posColumn2] = aux #Atualiza a matrix[posRow2][posColumn2] com o valor guardado em auxiliar\n return M #Retorna a matriz atualizada\n \n #Encontra todos os posíveis movimentos para cada nó => analisa as possibilidades de mover o espaço em branco pra cima, baixo, direita e/ou esquerda\n #Nota: a ordem explorada interfere diretamente nos resultados finais\n def possibilityMoves(M, indexRoot, nodes):\n (posRow, posColumn) = OperationsMatrix.findPosition(M, 0) #Procura o elemento '0' na matriz, ou seja, o espaço em branco\n possibility = [] #Move elemento, ou seja, cria cada filho\n if (posColumn-1>=0): #Checa se é possível mover para esquerda => retroceder uma posição na coluna\n possibility.append(OperationsMatrix.swapElementMatrix(deepcopy(M), posRow, posColumn, posRow, posColumn-1)) #Move em x--\n if (posColumn+1<sizeColumn): #Checa se é possível mover para direita => avançar uma posição na coluna\n possibility.append(OperationsMatrix.swapElementMatrix(deepcopy(M), posRow, posColumn, posRow, posColumn+1)) #Move em x++\n if (posRow-1>=0): #Checa se é possível mover para cima => retroceder uma posição na linha\n possibility.append(OperationsMatrix.swapElementMatrix(deepcopy(M), posRow, posColumn, posRow-1, posColumn)) #Move em y--\n if (posRow+1<sizeRow): #Checa se é possível mover para baixo => avançar uma posição na linha\n possibility.append(OperationsMatrix.swapElementMatrix(deepcopy(M), posRow, posColumn, posRow+1, posColumn)) #Move em y++\n \n #Elimina o que for igual ao avó, para não ter que consultar no vetor\n if (indexRoot > 0):\n for element in possibility:\n if (element == nodes[indexRoot].getMatrix()):\n possibility.remove(element)\n return possibility\n \n #Checa se o estado atual é o estado objetivo\n def verifyGoal(M, goal):\n return (M == goal) #Caso não detectar nenhum elemento diferente, retorna verdadeiro, pois ambos são iguais\n \n #Inicializa a lista de explorados\n def initializeExplored():\n explored = []\n for i in range(sizeRow*sizeColumn): #Para o total de possições possíveis de conter a posição vazia\n listA = [] #Cria uma lista vazia\n for j in range(sizeRow*sizeColumn): #Para o total de possições possíveis de conter a posição vazia\n listB = [] #Cria uma lista vazia\n for k in range(sizeRow*sizeColumn): #Para o total de possições possíveis de conter a posição vazia\n listC = [] #Cria uma lista vazia\n for m in range(sizeRow*sizeColumn): #Para o total de possições possíveis de conter a posição vazia\n listD = [] #Cria uma lista vazia\n for n in range(sizeRow*sizeColumn): #Para o total de possições possíveis de conter a posição vazia\n listD.append([]) #Inclui uma lista vazia\n listC.append(listD) #Inclui uma lista de listas vazia\n listB.append(listC) #Inclui uma lista de lista de listas vazia\n listA.append(listB) #Inclui uma lista de lista de lista de listas vazias\n explored.append(listA) #Inclui uma lista de lista de lista de lista de listas vazias\n return explored #Retorna a lista de listas inicializada\n \n #Calcula o tamanho da lista de explorados com base nas sublistas\n def getSizeExplored(explored):\n lenExplored = 0 #Inicializa o tamanho como zero\n for eListA in explored: #Percorre cada sublista\n for eListB in eListA: #Percorre cada sublista\n for eListC in eListB: #Percorre cada sublista\n for eListD in eListC: #Percorre cada sublista\n for e in eListD: #Percorre cada sublista\n lenExplored = lenExplored+len(e) #Contabiliza o tamanho de cada sublista\n return lenExplored #Retorna o valor total\n \n #Checa se o nó está entre os explorados\n def verifyNodeIsExplored(M, explored, nodes):\n pos1 = OperationsMatrix.getPosition(M, 0) #Pega a posição da sublista em que o elemento está na lista de explorados\n pos2 = OperationsMatrix.getPosition(M, 2) #Pega a posição da sublista em que o elemento está na sublista da lista de explorados\n pos3 = OperationsMatrix.getPosition(M, 4) #Pega a posição da sublista em que o elemento está na sublista da lista de explorados\n pos4 = OperationsMatrix.getPosition(M, 6) #Pega a posição da sublista em que o elemento está na sublista da lista de explorados\n pos5 = OperationsMatrix.getPosition(M, 8) #Pega a posição da sublista em que o elemento está na sublista da lista de explorados\n for index in explored[pos1][pos2][pos3][pos4][pos5]: #Percorre cada elemento dos explorados\n if (M == nodes[index].getMatrix()): #Verifica se o elemento observado no momento entre os explorados é igual ao elemento procurado\n return index #Caso seja, retorna o índice desse elemento na lista de explorados\n return -1 #Caso não encontre o elemento entre os explorados, retorna -1\n\n #Checa se o nó está na fronteira\n def verifyNodeIsFrontier(M, frontier, nodes):\n pos = 0\n for index in frontier: #Percorre cada elemento da fronteira\n if (M == nodes[index].getMatrix()): #Verifica se o elemento observado no momento na fronteira é igual ao elemento procurado\n return pos #Caso seja, retorna o índice desse elemento na fronteira\n pos = pos+1 #Avança uma posição\n return -1 #Caso não encontre o elemento na fronteira, retorna -1\n \n #Retorna o índice do nó com menor custo\n def getIndexNodeMinCost(listIndex, listNode):\n if (len(listIndex) > 0): #Verifica se a lista tem pelo menos algum elemento\n cont = 0 #Variável que representa a posição atual em que se está na lista\n minIndex = 0 #Variável que representa o índice do menor elemento da lista\n minCost = listNode[minIndex].getCost() + listNode[minIndex].getCostH() #Calcula o custo total (custo real + custo heurisitico) da posição inicial\n for element in listIndex: #Percorre cada elemento da lista\n totalCost = listNode[element].getCost() + listNode[element].getCostH() #Calcula o custo total (custo real + custo heurisitico) da posição atual\n if (totalCost < minCost): #Se o custo total da posição atual for menor que o mínimo conhecido\n minIndex = cont #Atualiza o índice do menor valor conhecido\n minCost = totalCost #Atualiza o valor do menor valor conhecido\n cont = cont+1 #Avança uma posição na lista\n return minIndex #Retorna o índice do elemento com menor custo\n return None #Lista vazia, ou seja, não existe elemento com menor custo\n\n #Calcula o custo com base na heurisitica escolhida\n def getHeuristCost(M, goal, heurist):\n if (heurist == 1):\n return OperationsMatrix.getHeuristCost1(M, goal) #Heuristica 1\n elif (heurist == 2):\n return OperationsMatrix.getHeuristCost2(M, goal) #Heuristica 2\n\n #Heurística 1: Número de quadrados em uma posição errada\n def getHeuristCost1(M, goal):\n value = 0 #Número de quadrados em uma posição errada\n for i in range(sizeRow): #Percorre cada linha da matriz\n for j in range(sizeColumn): #Percorre cada coluna da matriz\n element = goal[i][j]\n if (element > 0 and M[i][j] != element): #Não considera o espaço em branco. Se o valor na mesma posição é diferente nas duas matrizes\n value = value+1 #Incrementa o contador\n return value #Retorna o valor\n \n #Heurística 2: Manhattan Distance\n def getHeuristCost2(M, goal):\n value = 0 #Soma das distâncias que separam os quadrados das posições finais\n for i in range(sizeRow): #Percorre cada linha da matriz\n for j in range(sizeColumn): #Percorre cada coluna da matriz\n element = goal[i][j]\n if (element > 0): #Não considera o espaço em branco\n (posRow, posColumn) = OperationsMatrix.findPosition(M, element) #Procura na matriz M, o valor da posição atual observado no objetivo\n distanceX = abs(i-posRow) #Calcula a distância em X usando o módulo (abs)\n distanceY = abs(j-posColumn) #Calcula a distância em X usando o módulo (abs)\n value = value + distanceX + distanceY #Soma as distâncias\n #printText(str(M) + ' ' + str(goal) + '+' + str(distanceX + distanceY))\n #printText(str(M) + ' ' + str(value))\n return value #Retorna o valor total\n \n #Define a posição para inserir o elemento na fronteira, assumindo-se que a fronteira é uma lista em que inserimos os elementos de forma ordenada\n def getPositionInsert(nodes, listElement, cost, costH):\n ctei = (cost + costH) #Custo total do elemento a ser inserido\n lenListElement = len(listElement) #Tamanho da lista de elementos\n \n if (lenListElement == 0): #Se a fronteira está vazia\n return 0 #Insere no início\n \n node_init = nodes[listElement[0]] #Elemento initial da fronteira a ser comparado\n ctef_init = (node_init.getCost() + node_init.getCostH()) #Custo total do elemento da fronteira[0]\n \n node_end = nodes[listElement[lenListElement-1]] #Elemento final da fronteira a ser comparado\n ctef_end = (node_end.getCost() + node_end.getCostH()) #Custo total do elemento da fronteira[lenFrontier-1]\n \n if (ctef_init >= ctei): #Se o elemento tem custo total menor que o do primeiro\n return 0 #Insere no início\n elif (ctef_end <= ctei): #Se o elemento tem custo total maior que o do último\n return lenListElement #Insere no fim\n else:\n #Busca binária\n l = 0 #Começa com a esquerda na posição inicial do vetor\n r = lenListElement-1 #Começa com a direita na posição final do vetor\n\n while (l <= r and l > 0 and r > 0):\n m = int((l + r) / 2) #Calcula o meio\n\n node = nodes[listElement[m]] #Elemento atual da fronteira a ser comparado\n ctef = (node.getCost() + node.getCostH()) #Custo total do elemento da fronteira[m]\n\n if ((m+1) < lenListElement): #Verifica se o elemento está no intervalo entre listElement[m] e listElement[m+1]\n node_prox = nodes[listElement[m+1]] #Elemento atual da fronteira a ser comparado\n ctef_prox = (node_prox.getCost() + node_prox.getCostH()) #Custo total do elemento da fronteira[m+1]\n \n if ((ctei >= ctef) and (ctef_prox >= ctei)): #Está entre listElement[m] e listElement[m+1]\n return m+1\n if ((m-1) > 0): #Verifica se o elemento está no intervalo entre listElement[m-1] e listElement[m]\n node_ant = nodes[listElement[m-1]] #Elemento atual da fronteira a ser comparado\n ctef_ant = (node_ant.getCost() + node_ant.getCostH()) #Custo total do elemento da fronteira[m-1]\n \n if ((ctei >= ctef_ant) and (ctef >= ctei)): #Está entre listElement[m-1] e listElement[m]\n return m\n elif ctei > ctef: #É maior que listElement[m]\n l = m\n else: #É menor que listElement[m]\n r = m\n return lenListElement #Insere no fim\n\nclass SearchWithoutInformation: \n def breadthFirstSearch(M, goal):\n timeBegin = time.time()\n \n #Verifica se o nó atual é igual ao objetivo\n if (OperationsMatrix.verifyGoal(M, goal)):\n timeEnd = time.time()\n result = Result(\"breadthFirstSearch\", timeBegin, timeEnd, 0, 0, 0, [], -1)\n result.printResultSearch()\n return\n \n #Inicializa a lista de nós como vazia\n nodes = []\n index = -1\n nodes.append(Node(index, M, 0, 0))\n \n #Inicializa a fronteira\n frontier = []\n #Insere na fronteira o primeiro nó (Nó inicial)\n frontier.append(0)\n #Inicializa a lista de nós explorados como vazia\n explored = OperationsMatrix.initializeExplored()\n \n #Loop => Enquanto a fronteira não estiver vazia\n while(len(frontier) > 0): \n #Atualiza nó => Seleciona o primeiro elemento da fronteira\n index = frontier.pop(0)\n M = nodes[index].getMatrix()\n \n #Marca o nó como explorado\n explored[OperationsMatrix.getPosition(M, 0)][OperationsMatrix.getPosition(M, 2)][OperationsMatrix.getPosition(M, 4)][OperationsMatrix.getPosition(M, 6)][OperationsMatrix.getPosition(M, 8)].append(index)\n \n #printTextInline(M)\n #printTextInline(' ==> ')\n \n #Explora cada filho gerado\n for children in OperationsMatrix.possibilityMoves(M, nodes[index].getRoot(), nodes):\n #Verifica se o nó não está entre os explorados e não está na fronteira\n if (OperationsMatrix.verifyNodeIsExplored(children, explored, nodes) < 0 and OperationsMatrix.verifyNodeIsFrontier(children, frontier, nodes) < 0):\n #Verifica se o nó é igual a solução objetivo\n if (OperationsMatrix.verifyGoal(children, goal)):\n timeEnd = time.time()\n #Insere o resultado na lista de nós\n nodes.append(Node(index, children, 0, 0))\n #Insere o resultado na fronteira\n frontier.append(len(nodes)-1)\n #Atualiza o índice do último nó\n index = len(nodes)-1\n #Mostra o resultado\n result = Result(\"breadthFirstSearch\", timeBegin, timeEnd, len(frontier), OperationsMatrix.getSizeExplored(explored), len(nodes), nodes, index)\n result.printResultSearch()\n return\n \n #printTextInline(children)\n #printTextInline(' ')\n \n #Adiciona nó na fronteira\n nodes.append(Node(index, children, 0, 0))\n frontier.append(len(nodes)-1)\n #printMatrix(children)\n \n #printText('')\n #printText(str(len(frontier)) + ' === ' + str(OperationsMatrix.getSizeExplored(explored)))\n \n #Imprime a fronteira\n #countFrontier = 1\n #for index in frontier:\n #printText('Element frontier ' + str(countFrontier))\n #printMatrix(nodes[index].getMatrix())\n #countFrontier = countFrontier+1\n\n def uniformCostSearch(M, goal):\n timeBegin = time.time()\n \n #Define o custo real do nó inicial como zero\n pathCost = 0\n \n #Inicializa a lista de nós como vazia\n nodes = []\n index = -1\n nodes.append(Node(index, M, pathCost, 0))\n \n #Inicializa a fronteira\n frontier = []\n #Insere na fronteira o primeiro nó (Nó inicial)\n frontier.append(0)\n #Inicializa a lista de nós explorados como vazia\n explored = OperationsMatrix.initializeExplored()\n \n #Loop => Enquanto a fronteira não estiver vazia\n while(len(frontier) != 0): \n #Atualiza nó => Seleciona o nó de menor custo (Apenas real) da fronteira\n index = frontier.pop(0)\n M = nodes[index].getMatrix()\n \n #Imprime a iteração\n #printText('Iteration => ' + str(pathCost))\n #OperationsMatrix.printMatrix(M)\n \n #Verifica se o nó atual é igual ao objetivo\n if (OperationsMatrix.verifyGoal(M, goal)):\n timeEnd = time.time()\n #Mostra o resultado\n result = Result(\"uniformCostSearch\", timeBegin, timeEnd, len(frontier), OperationsMatrix.getSizeExplored(explored), len(nodes), nodes, index)\n result.printResultSearch()\n return\n \n #Marca o nó como explorado\n explored[OperationsMatrix.getPosition(M, 0)][OperationsMatrix.getPosition(M, 2)][OperationsMatrix.getPosition(M, 4)][OperationsMatrix.getPosition(M, 6)][OperationsMatrix.getPosition(M, 8)].append(index)\n \n #Explora cada filho gerado\n for children in OperationsMatrix.possibilityMoves(M, nodes[index].getRoot(), nodes):\n indexExplored = OperationsMatrix.verifyNodeIsExplored(children, explored, nodes) #Verifica se o nó está entre os explorados\n indexFrontier = OperationsMatrix.verifyNodeIsFrontier(children, frontier, nodes) #Verifica se o nó está na fronteira\n pos = OperationsMatrix.getPositionInsert(nodes, frontier, pathCost, 0) #Posição em que o nó deve ser inserido para inserir ordenado\n if (indexExplored < 0 and indexFrontier < 0): #Se o nó não foi explorado e nem está na fronteira\n #Adiciona nó na lista de nós\n nodes.append(Node(index, children, pathCost, 0))\n #Adiciona o índice do nó na fronteira\n frontier.insert(pos, (len(nodes)-1))\n #OperationsMatrix.printMatrix(children)\n elif (indexFrontier >= 0): #Se o nó não foi explorado mas já está na fronteira\n #Atualiza aquele nó utilizando o novo pai e o novo custo => Isso porque foi identificado um novo caminho até ele com menor custo\n idx = frontier[indexFrontier]\n nodes[idx].setCost(pathCost) #Atualiza o custo do nó\n nodes[idx].setRoot(index) #Atualiza o pai do nó\n #Remove esse nó da fronteira e insere novamente com o custo atualizado\n frontier.insert(pos, idx)\n del frontier[indexFrontier]\n\n #Imprime a fronteira\n #countFrontier = 1\n #for index in frontier:\n #printText('Element frontier ' + str(countFrontier))\n #OperationsMatrix.printMatrix(nodes[index].getMatrix())\n #countFrontier = countFrontier+1\n \n pathCost = pathCost+1\n \n def depthSearch(M, limit, goal, timeBeginIDS):\n timeBegin = time.time()\n \n #Verifica se o nó atual é igual ao objetivo\n if (OperationsMatrix.verifyGoal(M, goal)):\n timeEnd = time.time()\n result = Result(\"iterativeDeepeningSearch\", timeBegin, timeEnd, 0, 0, 0, [], -1)\n result.printResultSearch()\n return True\n \n #Inicializa a lista de nós como vazia\n nodes = []\n index = -1\n totalNodes = 0\n \n #Inicializa a fronteira\n frontier = []\n \n #Adiciona nó na lista de nós com profundidade zero\n nodes.append(Node(index, M, 0, 0))\n totalNodes = totalNodes+1\n #Insere na fronteira o primeiro nó (Nó inicial)\n frontier.append(0)\n \n #Inicializa a lista de nós explorados como vazia\n explored = OperationsMatrix.initializeExplored()\n \n #print(frontier)\n #countFrontier = 0\n #for index in frontier:\n #print('Element frontier ' + str(countFrontier), end='')\n #OperationsMatrix.printMatrix(nodes[index].getMatrix())\n #countFrontier = countFrontier+1\n \n #Loop => Enquanto a fronteira não estiver vazia\n while(len(frontier) > 0): \n #Atualiza nó => Seleciona o último elemento da fronteira\n index = frontier.pop()\n M = nodes[index].getMatrix()\n depth = nodes[index].getCost()\n \n #Marca o nó como explorado\n explored[OperationsMatrix.getPosition(M, 0)][OperationsMatrix.getPosition(M, 2)][OperationsMatrix.getPosition(M, 4)][OperationsMatrix.getPosition(M, 6)][OperationsMatrix.getPosition(M, 8)].append(index)\n \n #Verifica se o nó é igual a solução objetivo\n if (OperationsMatrix.verifyGoal(M, goal)):\n timeEnd = time.time()\n #Mostra o resultado\n printTextInline('Nodes: ' + str(len(nodes)) + ' - ' + str(totalNodes) + ' - ')\n #result = Result(\"iterativeDeepeningSearch\", timeBegin, timeEnd, len(frontier), len(nodes)-len(frontier), totalNodes, nodes, index)\n result = Result(\"iterativeDeepeningSearch\", timeBeginIDS, timeEnd, len(frontier), len(nodes)-len(frontier), totalNodes, nodes, index)\n result.printResultSearch()\n return True\n \n #printTextInline(M)\n #printTextInline(' ==> ')\n \n #Se a profundidade dos filhos for menor que o limite, podemos gerar mais filhos\n if (depth+1 <= limit):\n #Explora cada filho gerado\n for children in OperationsMatrix.possibilityMoves(M, nodes[index].getRoot(), nodes):\n #Verifica se o nó está entre os explorados\n indexExplored = OperationsMatrix.verifyNodeIsExplored(children, explored, nodes)\n #Se já cheguei nesse nó antes e já explorei\n if (indexExplored >= 0):\n elementExplored = nodes[indexExplored]\n #Verifico se já cheguei nesse nó com um custo maior. Se sim, insiro ele na fronteira\n if ((elementExplored.getCost() + elementExplored.getCostH()) > depth+1):\n totalNodes = totalNodes+1\n #Atualizo o custo do nó\n nodes[indexExplored].setCost(depth+1)\n #Atualiza o pai do nó\n nodes[indexExplored].setRoot(index)\n #Adiciona o índice do nó na fronteira\n frontier.append(indexExplored)\n #print(children) \n \n else:\n #Verifica se o nó está na fronteira\n indexFrontier = OperationsMatrix.verifyNodeIsFrontier(children, frontier, nodes)\n #Se já cheguei nesse nó antes mas ele ainda não foi explorado, ou seja, ele está na fronteira\n if (indexFrontier >= 0):\n elementFrontier = nodes[indexFrontier]\n #Verifico se já cheguei nesse nó com um custo maior. Se sim, insiro ele na fronteira\n if ((elementFrontier.getCost() + elementFrontier.getCostH()) > depth+1):\n totalNodes = totalNodes+1\n #Atualizo o custo do nó\n nodes[indexFrontier].setCost(depth+1)\n #Atualiza o pai do nó\n nodes[indexFrontier].setRoot(index)\n #Adiciona o índice do nó na fronteira\n frontier.append(indexFrontier)\n #print(children)\n \n #Se esse nó não foi nem explorado e nem está na fronteira\n else:\n #Adiciona nó na lista de nós com profundidade = depth+1\n nodes.append(Node(index, children, depth+1, 0))\n totalNodes = totalNodes+1\n #Adiciona o índice do nó na fronteira\n frontier.append(len(nodes)-1)\n #print(children)\n \n #printText('')\n #printText(str(len(frontier)) + ' === ' + str(OperationsMatrix.getSizeExplored(explored)))\n \n #Imprime a fronteira\n #print(frontier)\n #countFrontier = 0\n #for index in frontier:\n #print('Element frontier ' + str(countFrontier), end='')\n #OperationsMatrix.printMatrix(nodes[index].getMatrix())\n #countFrontier = countFrontier+1\n \n timeEnd = time.time()\n #Mostra o resultado\n printTextInline('Nodes: ' + str(len(nodes)) + ' - ' + str(totalNodes) + ' - ')\n #result = Result(\"iterativeDeepeningSearch\", timeBegin, timeEnd, len(frontier), len(nodes)-len(frontier), totalNodes, nodes, index)\n #result.printResultSearch()\n \n return False\n\n def iterativeDeepeningSearch(M, maximum, goal):\n timeBegin = time.time() #Começa a contar o tempo total da busca em profundidade\n \n printText('-------------------')\n depth = 1 #Inicializa a profundidade como 1\n while depth <= maximum: #Enquanto a profundidade for menor ou igual a profundidade máxima\n timeI = time.time() #Começa a contar o tempo da busca na profundidade depth\n result = SearchWithoutInformation.depthSearch(M, depth, goal, timeBegin) #Tenta encontrar o nó objetivo numa profundidade máxima 'depth'\n timeE = time.time() #Termina de contar o tempo da busca na profundidade depth \n printText('depth: ' + str(depth) + ' - time: ' + str(timeE - timeI))\n printText('-------------------')\n if (result == True): #Se o resultado for verdadeiro, para\n break\n depth = depth+1 #Avança a profundidade\n \n timeEnd = time.time() #Termina de contar o tempo total da busca em profundidade\n printText('Total time: ' + str(timeEnd - timeBegin) + ' s')\n\nclass SearchWithInformation: \n def AStarSearch(M, goal, heurist):\n timeBegin = time.time()\n \n #Seta o custo real como zero\n pathCostG = 0\n #Define o custo heurístico com base na heurística selecionada => Heurística 1 ou 2\n pathCostH = OperationsMatrix.getHeuristCost(M, goal, heurist)\n \n #Inicializa a lista de nós como vazia\n nodes = []\n index = -1\n nodes.append(Node(index, M, pathCostG, pathCostH))\n \n #Inicializa a fronteira\n frontier = []\n #Insere na fronteira o primeiro nó (Nó inicial)\n frontier.append(0)\n #Inicializa a lista de nós explorados como vazia\n explored = OperationsMatrix.initializeExplored()\n \n #Loop => Enquanto a fronteira não estiver vazia\n while(len(frontier) != 0):\n #Atualiza nó => Seleciona o nó de menor custo (real + heuristico) da fronteira \n index = frontier.pop(0)\n M = nodes[index].getMatrix()\n \n #Imprime a iteração\n #printText('G => ' + str(pathCostG) + ' - H => ' + str(pathCostH))\n #OperationsMatrix.printMatrix(M)\n \n #Verifica se o nó é igual a solução objetivo\n if (OperationsMatrix.verifyGoal(M, goal)):\n timeEnd = time.time()\n result = Result(\"AStarSearchH\" + str(heurist), timeBegin, timeEnd, len(frontier), OperationsMatrix.getSizeExplored(explored), len(nodes), nodes, index)\n result.printResultSearch()\n return\n\n #Marca o nó como explorado (fechados)\n explored[OperationsMatrix.getPosition(M, 0)][OperationsMatrix.getPosition(M, 2)][OperationsMatrix.getPosition(M, 4)][OperationsMatrix.getPosition(M, 6)][OperationsMatrix.getPosition(M, 8)].append(index)\n \n #Explora cada filho gerado\n for children in OperationsMatrix.possibilityMoves(M, nodes[index].getRoot(), nodes): \n #Se o nó não está entre os fechados\n if (OperationsMatrix.verifyNodeIsExplored(children, explored, nodes) < 0):\n #Verifica se o nó está entre os abertos (fronteira)\n indexFrontier = OperationsMatrix.verifyNodeIsFrontier(children, frontier, nodes) #Verifica se o nó está na fronteira\n \n #Calcula o custo heurísitico\n pathCostH = OperationsMatrix.getHeuristCost(M, goal, heurist)\n \n #Posição em que o nó deve ser inserido para inserir ordenado\n pos = OperationsMatrix.getPositionInsert(nodes, frontier, pathCostG, pathCostH)\n\n #Se o nó ainda não foi aberto\n if (indexFrontier < 0):\n #Adiciona nó na lista de nós\n nodes.append(Node(index, children, pathCostG, pathCostH))\n #Adiciona o índice do nó na fronteira (abertos)\n frontier.insert(pos, (len(nodes)-1))\n #OperationsMatrix.printMatrix(children)\n \n #Se o nó já foi aberto\n else:\n idx = frontier[indexFrontier]\n #Se o custo real agora é menor que o custo real anterior, encontrei um caminho melhor\n if (pathCostG < nodes[idx].getCost()):\n #Atualiza o custo para se chegar nesse nó através do novo pai\n nodes[idx].setCost(pathCostG)\n #Atualiza o pai do nó\n nodes[idx].setRoot(indexpathCostG)\n #Remove esse nó da fronteira e insere novamente com o custo atualizado\n frontier.insert(pos, idx)\n del frontier[indexFrontier]\n \n #Imprime a fronteira\n #countFrontier = 1\n #for index in frontier:\n #printText('Element frontier ' + str(countFrontier))\n #OperationsMatrix.printMatrix(nodes[index].getMatrix())\n #countFrontier = countFrontier+1\n \n #Toda vez que descer um ramo na árvore, avança o custo real\n pathCostG = pathCostG+1\n \n def greedyBestFirstSearch(M, goal, heurist):\n timeBegin = time.time()\n \n #Calcula o custo heurísitico\n pathCostH = OperationsMatrix.getHeuristCost(M, goal, heurist)\n \n #Inicializa a lista de nós como vazia\n nodes = []\n index = -1\n nodes.append(Node(index, M, 0, pathCostH))\n \n #Inicializa a fronteira\n frontier = []\n #Insere na fronteira o primeiro nó (Nó inicial)\n frontier.append(0)\n #Inicializa a lista de nós explorados como vazia\n explored = OperationsMatrix.initializeExplored()\n \n #Loop => Enquanto a fronteira não estiver vazia\n while(len(frontier) > 0):\n #printText('')\n \n #Atualiza nó => Seleciona o nó de menor custo (Apenas heurístico) da fronteira \n minNode = OperationsMatrix.getIndexNodeMinCost(frontier, nodes)\n index = frontier[minNode]\n M = nodes[index].getMatrix()\n del frontier[minNode]\n \n #Verifica se o nó é igual a solução objetivo\n if (OperationsMatrix.verifyGoal(M, goal)):\n timeEnd = time.time()\n frontier.append(index)\n result = Result(\"greedyBestFirstSearchH\" + str(heurist), timeBegin, timeEnd, len(frontier), OperationsMatrix.getSizeExplored(explored), len(nodes), nodes, index)\n result.printResultSearch()\n return\n \n #Marca o nó como explorado\n explored[OperationsMatrix.getPosition(M, 0)][OperationsMatrix.getPosition(M, 2)][OperationsMatrix.getPosition(M, 4)][OperationsMatrix.getPosition(M, 6)][OperationsMatrix.getPosition(M, 8)].append(index)\n \n #Fronteira só deve ter em cada iteração os filhos gerados\n frontier = []\n \n #Explora cada filho gerado\n for children in OperationsMatrix.possibilityMoves(M, nodes[index].getRoot(), nodes):\n #Calcula o custo heurísitico\n pathCostH = OperationsMatrix.getHeuristCost(children, goal, heurist)\n \n #Se o nó não foi explorado\n if (OperationsMatrix.verifyNodeIsExplored(children, explored, nodes) < 0):\n #Adiciona nó na fronteira\n nodes.append(Node(index, children, 0, pathCostH))\n frontier.append(len(nodes)-1)\n #OperationsMatrix.printMatrix(children)\n\n #Imprime a fronteira\n #printText(frontier)\n #countFrontier = 1\n #for index in frontier:\n #printText('Element frontier ' + str(countFrontier))\n #OperationsMatrix.printMatrix(nodes[index].getMatrix())\n #countFrontier = countFrontier+1\n \n timeEnd = time.time()\n result = Result(\"greedyBestFirstSearchH\" + str(heurist), timeBegin, timeEnd, len(frontier), OperationsMatrix.getSizeExplored(explored), len(nodes), nodes, index)\n result.printResultSearch()\n\nclass LocalSearch:\n def hillclimbingSearch(M, goal, heurist):\n timeBegin = time.time()\n \n #Calcula o custo heurísitico\n pathCostH = OperationsMatrix.getHeuristCost(M, goal, heurist)\n \n #Inicializa a lista de nós como vazia\n nodes = []\n index = -1\n nodes.append(Node(index, M, 0, pathCostH))\n \n #Inicializa a fronteira\n frontier = []\n #Insere na fronteira o primeiro nó (Nó inicial)\n frontier.append(0)\n #Inicializa a lista de nós explorados como vazia\n explored = OperationsMatrix.initializeExplored()\n \n #Loop => Enquanto a fronteira não estiver vazia\n while(len(frontier) > 0):\n #printText('')\n \n #Inicializa as variáveis \n minNode = 0\n index = 0\n M = []\n \n #Movimento lateral\n while(len(frontier) > 0):\n #Atualiza nó => Seleciona o nó de menor custo (Apenas heurístico) da fronteira \n minNode = OperationsMatrix.getIndexNodeMinCost(frontier, nodes)\n index = frontier[minNode]\n M = nodes[index].getMatrix()\n del frontier[minNode]\n \n #Se o filho for pior que o pai, a busca para, ou seja, realiza um movimento lateral e continua\n indexRootNode = nodes[index].getRoot() #Pai do nó\n costNode = nodes[index].getCostH() #Custo do nó\n if (indexRootNode > 0):\n costRootNode = nodes[indexRootNode].getCostH() #Custo do pai do nó\n #Se o filho for pior que o pai, a busca para e vai para o movimento lateral. Caso contrário, continua\n if (costNode <= costRootNode):\n break\n \n #Verifica se o nó é igual a solução objetivo\n if (OperationsMatrix.verifyGoal(M, goal)):\n timeEnd = time.time()\n frontier.append(index)\n result = Result(\"hillclimbingSearchH\" + str(heurist), timeBegin, timeEnd, len(frontier), OperationsMatrix.getSizeExplored(explored), len(nodes), nodes, index)\n result.printResultSearch()\n return\n \n #Marca o nó como explorado\n explored[OperationsMatrix.getPosition(M, 0)][OperationsMatrix.getPosition(M, 2)][OperationsMatrix.getPosition(M, 4)][OperationsMatrix.getPosition(M, 6)][OperationsMatrix.getPosition(M, 8)].append(index)\n \n #Explora cada filho gerado\n for children in OperationsMatrix.possibilityMoves(M, nodes[index].getRoot(), nodes):\n #Calcula o custo heurísitico\n pathCostH = OperationsMatrix.getHeuristCost(children, goal, heurist)\n \n #Se o nó não foi explorado\n if (OperationsMatrix.verifyNodeIsExplored(children, explored, nodes) < 0):\n #Adiciona nó na fronteira\n nodes.append(Node(index, children, 0, pathCostH))\n frontier.append(len(nodes)-1)\n #OperationsMatrix.printMatrix(children)\n\n #Imprime a fronteira\n #printText(frontier)\n #countFrontier = 1\n #for index in frontier:\n #printText('Element frontier ' + str(countFrontier))\n #OperationsMatrix.printMatrix(nodes[index].getMatrix())\n #countFrontier = countFrontier+1\n \n timeEnd = time.time()\n result = Result(\"hillclimbingSearchH\" + str(heurist), timeBegin, timeEnd, len(frontier), OperationsMatrix.getSizeExplored(explored), len(nodes), nodes, index)\n result.printResultSearch()\n \nclass Tests:\n def runAllTests(initial, goal, maximum, algorithm):\n if (algorithm == 0 or algorithm == 1):\n printText('Initial matrix')\n OperationsMatrix.printMatrix(initial)\n printText('--------------------------------------------')\n printText('Goal matrix')\n OperationsMatrix.printMatrix(goal)\n printText('--------------------------------------------')\n printText('******* Search Without Information ********')\n printText('--------------------------------------------')\n printText('Breadth First Search:')\n SearchWithoutInformation.breadthFirstSearch(initial, goal)\n \n if (algorithm == 0 or algorithm == 2):\n printText('\\n\\n\\n')\n printText('Initial matrix')\n OperationsMatrix.printMatrix(initial)\n printText('--------------------------------------------')\n printText('Goal matrix')\n OperationsMatrix.printMatrix(goal)\n printText('--------------------------------------------')\n printText('******* Search Without Information ********')\n printText('--------------------------------------------')\n printText('Uniform Cost Search:')\n SearchWithoutInformation.uniformCostSearch(initial, goal)\n \n if (algorithm == 0 or algorithm == 3):\n printText('\\n\\n\\n')\n printText('Initial matrix')\n OperationsMatrix.printMatrix(initial)\n printText('--------------------------------------------')\n printText('Goal matrix')\n OperationsMatrix.printMatrix(goal)\n printText('--------------------------------------------')\n printText('******* Search Without Information ********')\n printText('--------------------------------------------')\n printText('Iterative Deepening Search:')\n SearchWithoutInformation.iterativeDeepeningSearch(initial, maximum, goal)\n \n if (algorithm == 0 or algorithm == 4):\n printText('\\n\\n\\n')\n printText('Initial matrix')\n OperationsMatrix.printMatrix(initial)\n printText('--------------------------------------------')\n printText('Goal matrix')\n OperationsMatrix.printMatrix(goal)\n printText('--------------------------------------------')\n printText('********* Search With Information *********')\n printText('--------------------------------------------')\n printText('A Star Search Heuristic 1:')\n SearchWithInformation.AStarSearch(initial, goal, 1)\n \n if (algorithm == 0 or algorithm == 5):\n printText('\\n\\n\\n')\n printText('Initial matrix')\n OperationsMatrix.printMatrix(initial)\n printText('--------------------------------------------')\n printText('Goal matrix')\n OperationsMatrix.printMatrix(goal)\n printText('--------------------------------------------')\n printText('********* Search With Information *********')\n printText('--------------------------------------------')\n printText('A Star Search Heuristic 2:')\n SearchWithInformation.AStarSearch(initial, goal, 2)\n \n if (algorithm == 0 or algorithm == 6):\n printText('\\n\\n\\n')\n printText('Initial matrix')\n OperationsMatrix.printMatrix(initial)\n printText('--------------------------------------------')\n printText('Goal matrix')\n OperationsMatrix.printMatrix(goal)\n printText('--------------------------------------------')\n printText('********* Search With Information *********')\n printText('--------------------------------------------')\n printText('Greedy Best First Search Heuristic 1:')\n SearchWithInformation.greedyBestFirstSearch(initial, goal, 1)\n \n if (algorithm == 0 or algorithm == 7):\n printText('\\n\\n\\n')\n printText('Initial matrix')\n OperationsMatrix.printMatrix(initial)\n printText('--------------------------------------------')\n printText('Goal matrix')\n OperationsMatrix.printMatrix(goal)\n printText('--------------------------------------------')\n printText('********* Search With Information *********')\n printText('--------------------------------------------')\n printText('Greedy Best First Search Heuristic 2:')\n SearchWithInformation.greedyBestFirstSearch(initial, goal, 2)\n \n if (algorithm == 0 or algorithm == 8):\n printText('\\n\\n\\n')\n printText('Initial matrix')\n OperationsMatrix.printMatrix(initial)\n printText('--------------------------------------------')\n printText('Goal matrix')\n OperationsMatrix.printMatrix(goal)\n printText('--------------------------------------------')\n printText('*************** Local Search ***************')\n printText('--------------------------------------------')\n printText('Hill Climbing Search Heuristic 1:')\n LocalSearch.hillclimbingSearch(initial, goal, 1)\n \n if (algorithm == 0 or algorithm == 9):\n printText('\\n\\n\\n')\n printText('Initial matrix')\n OperationsMatrix.printMatrix(initial)\n printText('--------------------------------------------')\n printText('Goal matrix')\n OperationsMatrix.printMatrix(goal)\n printText('--------------------------------------------')\n printText('*************** Local Search ***************')\n printText('--------------------------------------------')\n printText('Hill Climbing Search Heuristic 2:')\n LocalSearch.hillclimbingSearch(initial, goal, 2)\n \ndef main(): \n initial = LoadMatrix.load(sys.argv[3])\n goal = LoadMatrix.load(sys.argv[4])\n \n Tests.runAllTests(initial, goal, 1000, int(sys.argv[5]))\n\n closeFileTXT()\n closeFileCSV()\n\n \nif __name__ == '__main__':\n main()\n" } ]
2
dass-cin/pontos-turisticos-recife
https://github.com/dass-cin/pontos-turisticos-recife
bb37c7f9f426e9fc86cd946bd02a994205d38b62
833211a019f13c72278e9bccefa19520fed5ac26
35ef95fa5156b185e785dd7da5b188c6977b8fb1
refs/heads/master
2021-01-10T12:49:38.742220
2015-11-26T05:33:48
2015-11-26T05:33:48
46,828,254
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6157407164573669, "alphanum_fraction": 0.621632993221283, "avg_line_length": 36.125, "blob_id": "4955258ba1ba001cfef0aac9b766c602a6942dc1", "content_id": "aaa3196f1b23879bf7d74931766cd798aba07b6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2376, "license_type": "no_license", "max_line_length": 299, "num_lines": 64, "path": "/main.py", "repo_name": "dass-cin/pontos-turisticos-recife", "src_encoding": "UTF-8", "text": "## Consumo dos dados da API de dados abertos do Recife\n\nfrom flask import Flask, request\nfrom flask_restful import Resource, Api\nfrom sqlalchemy import create_engine\nimport xml.etree.ElementTree as ET\nimport json\n\ne = create_engine('sqlite:///museus.db')\n\napp = Flask(__name__)\napi = Api(app)\n\n\nclass Museus_Meta(Resource):\n def get(self):\n conn = e.connect()\n query = conn.execute('select * from museus')\n result = {'data': [dict(zip(tuple(query.keys()), i)) for i in query.cursor]}\n return result\n\nclass Pontos_Turisticos_Meta(Resource):\n def get(self):\n tree = ET.parse('datasets-pe/EMPETUR_PontosTuristicosPE_dado.xml')\n root = tree.getroot()\n data = []\n for child in root.iter('registros'):\n for registro in child.iter('registro'):\n item = {}\n for attr in registro.attrib:\n schema = ['id', 'nome', 'codigocategoria', 'latitude', 'longitude', 'altitude', 'categoria', 'descricao', 'idioma', 'logradouro', 'municipio']\n item[schema[int(attr.replace(\"campo\",\"\"))-1]] = registro.get(attr)\n data.append(item)\n return { 'data' : data }\n\napi.add_resource(Museus_Meta, '/museus/')\napi.add_resource(Pontos_Turisticos_Meta, '/pontosTuristicos/')\n\nclass Importacao(object):\n def importPontos(self):\n e = create_engine('sqlite:///pontos_turisticos.db')\n conn = e.connect()\n\n JSON_FILE = \"datasets-pe/pontos-turisticos.json\"\n rawdata = json.load(open(JSON_FILE))\n parsed = rawdata['data']\n dataset = json.loads(json.dumps(parsed))\n\n for data in dataset:\n codigocategoria = None\n if data['codigocategoria'] != '': codigocategoria = int(data['codigocategoria'])\n conn.execute('INSERT INTO pontos_turisticos_normal VALUES (:1,:2,:3,:4,:5,:6,:7,:8,:9,:10,:11)', data['id'], data['nome'], codigocategoria, data['altitude'], data['categoria'], data['descricao'], data['idioma'], data['logradouro'], data['municipio'], data['latitude'], data['longitude'])\n\nif (not app.debug):\n import logging\n from logging import FileHandler\n file_handler = FileHandler('app.log')\n file_handler.setLevel(logging.WARNING)\n app.logger.addHandler(file_handler)\n\nif __name__ == '__main__':\n i = Importacao()\n i.importPontos()\n #app.run()\n" } ]
1
SirEdvin/SpideLair
https://github.com/SirEdvin/SpideLair
66aaa25eb4c7f8ccc618f1a7b5ae05b3ceb44a44
3cefc2487d8b4e447267dc1ac70b1a2bc6294464
ae9dcb782f85c559195bb8753bf4e9cd1e648268
refs/heads/master
2016-08-12T12:40:44.604749
2015-11-25T12:15:31
2015-11-25T12:15:31
45,752,030
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5708879232406616, "alphanum_fraction": 0.5729257464408875, "avg_line_length": 33.0098991394043, "blob_id": "7b57e1c0c81130a7f12d57e037cdb26d943f7ed8", "content_id": "26b4771fc6569ba4505bc041723e5bcaada251d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3435, "license_type": "no_license", "max_line_length": 103, "num_lines": 101, "path": "/actors/core.py", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "import logging\nimport pymongo\nfrom pykka import ThreadingActor, ActorRegistry\nfrom pykka.gevent import GeventActor\n\n\n__author__ = 'siredvin'\n\n\nclass SpiderLair(GeventActor):\n def __init__(self):\n super(SpiderLair, self).__init__()\n self._log = logging.getLogger('SpiderLair')\n self._patterns = {}\n self._commands = {\n 'register_actor': self.register_actor,\n 'get_url_list_by_name': self.get_url_list_by_name,\n 'save_metadata': self.save_metadata,\n 'add_url': self.add_url,\n 'get_spider_by_name': self.get_spider_by_name,\n 'get_web_spider_metadata': self.get_web_spider_metadata,\n 'add_notification': self.add_notification,\n 'kick_all': self.kick_all,\n 'get_all_spiders': self.get_all_spiders,\n None: lambda x: self._log.warning('Where is the command?!')\n }\n self._actor_list = []\n self._client = pymongo.MongoClient()\n self._db = self._client['spiders']\n\n def register_actor(self, actor):\n self._log.info('Register actor {0}'.format(actor._actor.name))\n self._actor_list.append(actor)\n self._patterns.update({actor._actor.site_url: actor})\n\n def get_all_spiders(self, message):\n return self._actor_list[:]\n\n def default_command(self, message):\n self._log.warning('Unknown command {0}'.format(message.get('command')))\n\n def get_spider_by_name(self, spider_name):\n return next((spider for spider in self._actor_list if spider._actor.name == spider_name), None)\n\n def on_receive(self, message):\n command = self._commands.get(message.get('command'), self.default_command)\n return command(message.get('data'))\n\n def get_url_list_by_name(self, spider_name):\n return list(self._db.urls.find({'spider_name': spider_name}, {'_id': False}))\n\n def save_metadata(self, data):\n for site_url in data:\n self._db.spider_metadata.update({\n 'spider_name': site_url.get('spider_name'),\n 'identificator': site_url.get('identificator')\n }, site_url, upsert=True)\n\n def add_url(self, url_dict):\n \"\"\"\n If url don't exists, add it to collection\n \"\"\"\n self._db.urls.update(url_dict, url_dict, upsert=True)\n\n def get_web_spider_metadata(self, spider_name):\n return list(self._db.spider_metadata.find({'spider_name': spider_name}))\n\n def add_notification(self, notification):\n self._db.notification.insert(notification)\n\n def kick_all(self, data):\n for actor in self._actor_list:\n if not actor.ask({\n 'command': 'is_work',\n 'data': None\n }):\n self._log.info('Start actor {0}'.format(actor._actor.name))\n actor.tell({\n 'command': 'run',\n 'data': None\n })\n\n @property\n def db(self):\n return self._db\n\n\nclass SleepyElder(ThreadingActor):\n def __init__(self):\n super(SleepyElder, self).__init__()\n self._lair = ActorRegistry.get_by_class_name('SpiderLair')[0]\n self._lair_command = {\n 'command': 'kick_all',\n 'data': None\n }\n self._work = True\n\n # def _start_actor_loop(self):\n # while self._work:\n # self._lair.tell(self._lair_command)\n # time.sleep(300)\n" }, { "alpha_fraction": 0.8518518805503845, "alphanum_fraction": 0.8518518805503845, "avg_line_length": 17, "blob_id": "f628b18122b0651563a51e5afc62faf96b3459a2", "content_id": "8dc63a713b5bb993d67addcf6f7f0c8ce6520c7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 54, "license_type": "no_license", "max_line_length": 23, "num_lines": 3, "path": "/flask_config.py", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "import initial\nimport restapi\nfrom initial import app\n" }, { "alpha_fraction": 0.765625, "alphanum_fraction": 0.765625, "avg_line_length": 15.25, "blob_id": "ff512ee97ed6cc902aca4f612f7b51c64681b721", "content_id": "a673062e12c87c5d20620b2fff51d2746188afd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 64, "license_type": "no_license", "max_line_length": 23, "num_lines": 4, "path": "/actors/__init__.py", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "__author__ = 'siredvin'\nimport abstract\nimport core\nimport manga" }, { "alpha_fraction": 0.7077922224998474, "alphanum_fraction": 0.7077922224998474, "avg_line_length": 21, "blob_id": "80719417091b80487a628943e5d986dc165f0bd1", "content_id": "da79c936ac89a96030e8bcb200c525b73d4c5d0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 616, "license_type": "no_license", "max_line_length": 85, "num_lines": 28, "path": "/initial.py", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "from flask import Flask, session, redirect, url_for, escape, request, render_template\nimport pymongo\n\napp = Flask(__name__)\n\ndb = pymongo.MongoClient()['spiders']\n\n\[email protected]('/')\ndef hello():\n return render_template('base.html', categories=get_categories())\n\n\[email protected]('/categories')\ndef categories():\n return render_template('categories.html', categories=get_categories())\n\n\[email protected]('/settings')\ndef settings():\n return render_template('settings.html')\n\n\ndef get_categories():\n return db.notification.distinct('category')\n\n# set the secret key. keep this really secret:\napp.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\n" }, { "alpha_fraction": 0.7209302186965942, "alphanum_fraction": 0.7209302186965942, "avg_line_length": 20.5, "blob_id": "3110d016c6eba8e74bce96cf2fc2011127db26cb", "content_id": "e61543f1c288d6dbabd8f2382bb7fdf678fa8eaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43, "license_type": "no_license", "max_line_length": 23, "num_lines": 2, "path": "/util/__init__.py", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "__author__ = 'siredvin'\nimport metaclasses\n" }, { "alpha_fraction": 0.7273991703987122, "alphanum_fraction": 0.7273991703987122, "avg_line_length": 24.678571701049805, "blob_id": "a028272ecd43aa80e547f826b166655cacd40291", "content_id": "56ffca64abf93e495c591bccccfc5710e2c3a2eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 719, "license_type": "no_license", "max_line_length": 107, "num_lines": 28, "path": "/docs/conversation.md", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "# Database Structure\n### Spider URL collection\n\n**Database**: urls\n\n+ spider_name - name of spider \n+ search_url - url, where spider search data \n\nEach spider can add his metadata fields in this collection \n\n### URL Notification collection\n\n**Database**: notification\n\n+ notify_title - Notification title\n+ category - Category (Default Basic)\n+ tags - Tags (array) (optional)\n+ url - Notification url\n+ body - (optional) notification body\n+ image - (optional) url to image\n\n### Web Spider metadata\n\n**Database**: spider_metadata\n\n+ spider_name - name of spider. never change\n+ url - data url. Can be changed\n+ identificator - Some text field, that helps spider to identificate data, that was processed. Never change\n" }, { "alpha_fraction": 0.5754475593566895, "alphanum_fraction": 0.5775788426399231, "avg_line_length": 36.83871078491211, "blob_id": "18c854a5a80b074d89c67fa62655cb9f4c06fb03", "content_id": "7b84b3b904a5998945761b300881c8c7157195c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2346, "license_type": "no_license", "max_line_length": 117, "num_lines": 62, "path": "/actors/manga.py", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "from flask import url_for\n\n__author__ = 'siredvin'\nfrom actors.abstract import AbstractRSSSpider\nimport re\nfrom lxml import html\n\n\nclass ReadmangaSpider(AbstractRSSSpider):\n def __init__(self):\n self.name = 'Charlatta'\n self.description = 'Cute spider, loves manga'\n self.site_description = 'ReadManga'\n self.site_url = 'readmanga.me'\n super(ReadmangaSpider, self).__init__()\n\n def _notify(self, notification):\n self._log.info('Spider {0} create notification'.format(self.name))\n notification.update({\n 'category': 'Manga',\n 'tags': ['manga', 'readmanga']\n })\n super(ReadmangaSpider, self)._notify(notification)\n\n def _process_url(self, url_dict):\n \"\"\"\n Convert url from\n http://readmanga.me/fairy_tail/\n to\n http://readmanga.me/rss/manga?name=fairy_tail\n\n And add manga_url tag, extract manga name and genres\n Return None if link are incorrect\n \"\"\"\n url = url_dict.get('search_url')\n site = re.search(\"\\w+://.+?/\", url).group()\n url = url.replace(site, \"\")\n if len(url) < 1:\n return None\n manga_name = url.split(\"/\")[0]\n url_dict['search_url'] = site + 'rss/manga?name=' + manga_name\n url_dict['manga_url'] = site + manga_name\n if len(filter(lambda x: x['manga_url'] == url_dict['manga_url'], self._urls)) > 0:\n self._log.info('Manga with url %s already in %s database' % (url_dict['manga_url'], self.name))\n return None\n # Extract genres from manga\n manga_site = html.parse(url_dict['manga_url'])\n genres = map(lambda x: x.text_content().replace(', ', ''), manga_site.getroot().find_class('elem_genre'))\n url_dict['genres'] = genres\n # Extract manga name for name get\n url_dict['name'] = \\\n manga_site.getroot().xpath('//*[@id=\"mangaBox\"]/div[@class=\"leftContent\"]/meta[@itemprop=\"name\"]')[0].attrib[\n 'content']\n self._log.info('Spider %s add manga with name %s' % (self.name, url_dict['name']))\n return url_dict\n\n def get_info(self, message):\n result = super(ReadmangaSpider, self).get_info(message)\n result.update({\n 'image_url': 'images/charlatta-chibi.png'\n })\n return result\n" }, { "alpha_fraction": 0.5366336703300476, "alphanum_fraction": 0.5366336703300476, "avg_line_length": 36.44444274902344, "blob_id": "ef4f15ed3719d20ba9529c2d0a54026ce69f7566", "content_id": "a86be4048b1363276bfa8b61ac93c7b57c1e60af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 110, "num_lines": 27, "path": "/static/js/categories.js", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "$(document).ready(function () {\n var set_active = function (link) {\n var closest_ul = link.closest(\"ul\");\n var parallel_active_links = closest_ul.find(\".active-category\");\n parallel_active_links.removeClass(\"active-category\");\n var closest_li = link.closest(\"li\");\n closest_li.addClass('active-category')\n };\n\n slide_link($(\"#categories-link\"));\n $(\"#categories a\").on(\"click\", function () {\n var link = $(this);\n var category = link.text();\n var site_root = get_site_root();\n set_active(link);\n $.ajax({\n url: site_root + 'api/alpha/notify/' + category,\n type: 'Get'\n }).done(function (data) {\n var sticker_list = $(\"#category-stickers\");\n sticker_list.empty();\n data.notifications.forEach(function (currentValue) {\n sticker_list.append('<li><a class=\"green folder\" href=\"#\">' + currentValue.text + '</a></li>')\n })\n });\n });\n});" }, { "alpha_fraction": 0.5335237979888916, "alphanum_fraction": 0.5340952277183533, "avg_line_length": 28.335195541381836, "blob_id": "456a72241872adf31d7ca886a62e81e2fb0299ca", "content_id": "515555ef837ba2155d8292ac1c5e8922f5bc195f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5250, "license_type": "no_license", "max_line_length": 110, "num_lines": 179, "path": "/actors/abstract.py", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "import logging\nimport abc\nimport feedparser\nfrom pykka import ActorRegistry\nfrom pykka.gevent import GeventActor\n\n\nclass AbstractSpider(GeventActor):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self):\n super(AbstractSpider, self).__init__()\n self._lair = ActorRegistry.get_by_class_name('SpiderLair')[0]\n self._log = logging.getLogger('Spider')\n self._on_work = False\n if not self.name or not self.description or not self.site_description or not self.site_url:\n raise Exception(\n \"name, description, site_description, site_url must be defined abowe the super __init__ call\")\n self._commands = {\n 'run': self._run,\n 'is_work': self._is_work,\n 'get_info': self.get_info,\n 'add_urls': self.add_urls,\n 'get_settings_and_info': self.get_settings_and_info,\n None: self.default_command\n }\n\n @abc.abstractmethod\n def _run(self, data):\n pass\n\n def default_command(self, message):\n self._log.warning('Unknown command {0}'.format(message.get('command')))\n\n @abc.abstractmethod\n def add_urls(self, message):\n pass\n\n def get_info(self, message):\n return {\n 'name': self.name,\n 'description': self.description\n }\n\n def get_settings_and_info(self, message):\n info = self.get_info(message)\n info.update({\n 'site_description': self.site_description,\n 'site_url': self.site_url\n })\n return info\n\n def on_receive(self, message):\n command = self._commands.get(message.get('command'), self.default_command)\n return command(message.get('data'))\n\n def _is_work(self, data):\n return self._on_work\n\n\nclass AbstractWebSpider(AbstractSpider):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self):\n super(AbstractWebSpider, self).__init__()\n self._reload_from_database()\n def _reload_from_database(self):\n self._load_urls()\n self._load_metadata()\n\n def _load_urls(self):\n self._urls = self._lair.ask({\n 'command': 'get_url_list_by_name',\n 'data': self.name\n })\n\n def _save_metadata(self):\n self._lair.tell({\n 'command': 'save_metadata',\n 'data': self._get_metadata()\n })\n\n def _load_metadata(self):\n metadata = self._lair.ask({\n 'command': 'get_web_spider_metadata',\n 'data': self.name\n })\n self._process_metadata(metadata)\n\n def get_settings_and_info(self, message):\n info = super(AbstractWebSpider, self).get_settings_and_info(message)\n info.update({\n 'urls': self._urls\n })\n return info\n\n def _notify(self, notification):\n self._lair.tell({\n 'command': 'add_notification',\n 'data': notification\n })\n\n def add_urls(self, message):\n urls = message.get('urls')\n for url in urls:\n url_dict = {\n 'spider_name': self.name,\n 'search_url': url\n }\n url_dict = self._process_url(url_dict)\n if url_dict:\n self._lair.tell({\n 'command': 'add_url',\n 'data': url_dict\n })\n self._load_urls()\n\n @abc.abstractmethod\n def _process_metadata(self, metadata):\n \"\"\"\n Process list of metadata for spider\n \"\"\"\n pass\n\n def _process_url(self, url_dict):\n return url_dict\n\n @abc.abstractmethod\n def _get_metadata(self):\n pass\n\n\nclass AbstractRSSSpider(AbstractWebSpider):\n __metaclass__ = abc.ABCMeta\n\n def __init__(self):\n super(AbstractRSSSpider, self).__init__()\n\n def _process_metadata(self, metadata):\n \"\"\"\n RSS can find last element, based on link\n \"\"\"\n self._last_link = {md.get('identificator'): md.get('url') for md in metadata}\n\n def get_settings_and_info(self, message):\n info = super(AbstractRSSSpider, self).get_settings_and_info(message)\n info.update({\n '_last_links': self._last_link\n })\n return info\n\n def _get_metadata(self):\n metadata = []\n for identificator, url in self._last_link.iteritems():\n metadata.append({\n 'spider_name': self.name,\n 'url': url,\n 'identificator': identificator\n })\n return metadata\n\n def _run(self, data):\n self._on_work = True\n for url in self._urls:\n result = feedparser.parse(url.get('search_url'))\n last_link = self._last_link.get(url.get('search_url'))\n for item in result.entries:\n if item.link != last_link:\n self._notify({\n 'notify_title': item.title,\n 'category': 'Basic',\n 'url': item.link,\n 'body': item.description,\n })\n else:\n break\n self._last_link.update({url.get('search_url'): result.entries[0].link})\n self._save_metadata()\n self._on_work = False" }, { "alpha_fraction": 0.5934605002403259, "alphanum_fraction": 0.5967302322387695, "avg_line_length": 27.230770111083984, "blob_id": "67bb9bf4dd2ca53d7723acc4c1491361e65f5fab", "content_id": "13fc0c5d6cdb1eed416eebc7c08af336ba6bdb64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1835, "license_type": "no_license", "max_line_length": 92, "num_lines": 65, "path": "/restapi.py", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "import pymongo\nfrom flask import jsonify, url_for\nfrom initial import app\nfrom pykka import ActorRegistry\n\ndb = pymongo.MongoClient()['spiders']\n\n\[email protected]('/api/alpha/all_notifications', methods=['GET'])\ndef get_all_notification_function():\n return jsonify({\n 'notifications': list(db.notification.find({}, {'_id': False}))\n })\n\n\[email protected]('/api/alpha/run_all', methods=['GET'])\ndef run_all():\n ActorRegistry.get_by_class_name('SpiderLair')[0].tell({\n 'command': 'kick_all'\n })\n return jsonify({'result': 'ok'})\n\n\[email protected]('/api/alpha/spider/list', methods=['GET'])\ndef spider_list():\n spiders = ActorRegistry.get_by_class_name('SpiderLair')[0].ask({\n 'command': 'get_all_spiders'\n })\n result = []\n for spider in spiders:\n info = spider.ask({\n 'command': 'get_info'\n })\n if info:\n if info.get('image_url'):\n info['image_url'] = url_for('static', filename=info['image_url'])\n result.append(info)\n return jsonify({\n 'spiders_info': result\n })\n\n\[email protected]('/api/alpha/spider/<spider_name>/info', methods=['GET'])\ndef spider_info(spider_name):\n spider = ActorRegistry.get_by_class_name('SpiderLair')[0].ask({\n 'command': 'get_spider_by_name',\n 'data': spider_name\n })\n if spider:\n spider_info_result = spider.ask({\n 'command': 'get_settings_and_info'\n })\n return jsonify(spider_info_result)\n return jsonify({\n 'result': '500',\n 'message': 'No spider with this name'\n })\n\n\[email protected]('/api/alpha/notify')\[email protected]('/api/alpha/notify/<category>', methods=['GET'])\ndef get_category_notify(category='Basic'):\n return jsonify({\n 'notifications': list(db.notification.find({'category': category}, {'_id': False}))}\n )\n" }, { "alpha_fraction": 0.5869565010070801, "alphanum_fraction": 0.5869565010070801, "avg_line_length": 27.384614944458008, "blob_id": "5e2e23c7c77174928f01b0f518ac44ce78d079a4", "content_id": "2e4bc76639eb55e69df83e029424ea15e74e1e2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "HTML", "length_bytes": 368, "license_type": "no_license", "max_line_length": 87, "num_lines": 13, "path": "/templates/settings.html", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "{% extends 'base.html' %}\n{% block headblock %}\n <link href=\"{{ url_for('static', filename='css/settings.css') }}\" rel=\"stylesheet\">\n <script src=\"{{ url_for('static', filename='js/settings.js') }}\"></script>\n{% endblock %}\n{% block bodyblock %}\n <ul class=\"sticker-tiles\" id=\"spiders-list\">\n </ul>\n\n <div id=\"spider-setting\">\n\n </div>\n{% endblock %}" }, { "alpha_fraction": 0.5827814340591431, "alphanum_fraction": 0.5842531323432922, "avg_line_length": 26.755102157592773, "blob_id": "e9c119a09f78db394106cb921d058c1df1ea3ae5", "content_id": "680ac73cdd425b9ef27e843c4606c062b81f2d70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1359, "license_type": "no_license", "max_line_length": 59, "num_lines": 49, "path": "/static/js/main.js", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "$(document).ready(function () {\n /*$(\"#accordian a\").click(function () {\n var link = $(this);\n var closest_ul = link.closest(\"ul\");\n var parallel_active_links = closest_ul.find(\".active\")\n var closest_li = link.closest(\"li\");\n var link_status = closest_li.hasClass(\"active\");\n var count = 0;\n\n closest_ul.find(\"ul\").slideUp(function () {\n if (++count == closest_ul.find(\"ul\").length)\n parallel_active_links.removeClass(\"active\");\n });\n\n if (!link_status) {\n closest_li.children(\"ul\").slideDown();\n closest_li.addClass(\"active\");\n }\n });*/\n});\n\n\nfunction slide_link(link) {\n var closest_ul = link.closest(\"ul\");\n var parallel_active_links = closest_ul.find(\".active\")\n var closest_li = link.closest(\"li\");\n var link_status = closest_li.hasClass(\"active\");\n var count = 0;\n\n closest_ul.find(\"ul\").slideUp(function () {\n if (++count == closest_ul.find(\"ul\").length)\n parallel_active_links.removeClass(\"active\");\n });\n\n if (!link_status) {\n closest_li.children(\"ul\").slideDown();\n closest_li.addClass(\"active\");\n }\n}\n\nfunction get_site_root(){\n var location = window.location;\n result = location.protocol+\"//\"+location.hostname;\n if (location.port){\n result += \":\" + location.port\n }\n result += \"/\";\n return result\n}" }, { "alpha_fraction": 0.5810397267341614, "alphanum_fraction": 0.5856269001960754, "avg_line_length": 20.09677505493164, "blob_id": "80da345195e0583df3b6efe2423667983411e721", "content_id": "2acace5de19280695ab6638c6840803af365969c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 110, "num_lines": 31, "path": "/main.py", "repo_name": "SirEdvin/SpideLair", "src_encoding": "UTF-8", "text": "import logging\nfrom gevent import monkey\nimport actors.core as core\nimport actors.manga as manga\nfrom flask_config import app\n\n__author__ = 'siredvin'\n\nlogging.basicConfig(level=logging.INFO)\nmonkey.patch_all()\n\nif __name__ == '__main__':\n lair = core.SpiderLair.start()\n mng = manga.ReadmangaSpider.start()\n lair.ask({\n 'command': 'register_actor',\n 'data': mng\n })\n\n mng.ask({\n 'command': 'add_urls',\n 'data': {\n 'urls': ['http://readmanga.me/lessa_the_crimson_knight/vol1/96', 'http://readmanga.me/fairy_tail']\n }\n })\n\n lair.ask({\n 'command': 'kick_all',\n })\n\n app.run()\n" } ]
13
gsrr/Regex
https://github.com/gsrr/Regex
407672513bdbd02d8e56e126bc5c98b10880be11
6ceaa47f9d0d9ef6272a0d44095d5e8c3fb64c36
b6b5d4f73473a7dbc7a9342dac4cd61f82bd4e36
refs/heads/master
2021-01-10T10:57:10.635097
2015-10-05T10:44:34
2015-10-05T10:44:34
43,664,395
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 17, "blob_id": "c35be923268aeac61dde5bde8c2815b03eb73d16", "content_id": "5465e67157f91f82e41f021e8c4386df99b797c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 36, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/README.md", "repo_name": "gsrr/Regex", "src_encoding": "UTF-8", "text": "# Regex\nRegular Expression examples\n" }, { "alpha_fraction": 0.6185566782951355, "alphanum_fraction": 0.6396661996841431, "avg_line_length": 21.384614944458008, "blob_id": "0f8bd173259d69f7aea74a95e3756d061ea9ae43", "content_id": "862e951a9ce3f78d9efa094456b47611ca1a7fb2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2037, "license_type": "no_license", "max_line_length": 106, "num_lines": 91, "path": "/regex.py", "repo_name": "gsrr/Regex", "src_encoding": "UTF-8", "text": "#ref , http://marco79423.twbbs.org/articles/%E6%B7%BA%E8%AB%87-regex-%E5%8F%8A%E5%85%B6%E6%87%89%E7%94%A8/\n\nimport re\nimport sys\n\n\ndef readFile():\n\tdata = []\n\twith open(\"data.ex\", \"r\") as fr:\n\t\tfor line in fr.readlines():\n\t\t\tdata.append(line.strip())\n\treturn data\t\n\n\ndef select(line):\n\tmatchObj = re.match( r'this is a pencil', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint matchObj.group(0)\n\ndef select_or(line):\n\tmatchObj = re.match( r'this is a (p|b)', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\n\tmatchObj = re.match( r'this is a [pb]', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\ndef select_un(line):\n\tmatchObj = re.match( r'this is a [^c]', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\ndef count_star(line):\n\tmatchObj = re.match( r'this is a (p*)encil', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\t\ndef count_plus(line):\n\tmatchObj = re.match( r'this is a (p+)encil', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\ndef count_num(line):\n\tmatchObj = re.match( r'this is a (p){2,3}encil', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\ndef position_start(line):\n\tmatchObj = re.match( r'^tthis is a (p){2,3}encil', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\ndef position_end(line):\n\tmatchObj = re.match( r'this is a (p){2,3}encil$', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\ndef extract_dup(line):\n\tmatchObj = re.match( r'(this){2} is a (p){2,3}encil$', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\ndef extract_num(line):\n\tmatchObj = re.match( r'this is a (\\d+)cm pencil', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\t\tprint matchObj.group(0)\n\t\tprint matchObj.group(1)\n\ndef extract_insert(line):\n\tmatchObj = re.match( r'this is a (\\d+)cm (\\1) type pencil', line, re.M|re.I)\t\n\tif matchObj:\n\t\tprint line\n\t\tprint matchObj.group(0)\n\t\tprint matchObj.group(1)\n\ndef main():\n\tdata = readFile()\n\tfunc = getattr(sys.modules[__name__], sys.argv[1])\n\tfor line in data:\n\t\tfunc(line)\n\t\tprint\n\t\t'''\n\t\tmatchObj = re.search( r'his is a pen.', line, re.M|re.I)\t\n\t\tif matchObj:\n\t\t\tprint matchObj.group(0)\n\t\t'''\n\nif __name__ == \"__main__\":\n\tmain()\n" } ]
2
youssefelmasry/lenme_assignment
https://github.com/youssefelmasry/lenme_assignment
470c0c5047378ebe307001254de18f75db081e82
0f0592dc9fe496b48c0740782f13d960baf07f91
59f12674fa92d09bf04a64beb7723f1b1be38cbf
refs/heads/master
2023-03-01T13:48:00.945668
2021-01-30T18:19:00
2021-01-30T18:19:00
334,390,848
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8411552309989929, "alphanum_fraction": 0.8411552309989929, "avg_line_length": 33.625, "blob_id": "03bb83d20b607fde75c1bcf2390246a64a0d71a6", "content_id": "7be1dff023dc23ab12a31dc07e632d2cf3c2bd32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 277, "license_type": "no_license", "max_line_length": 52, "num_lines": 8, "path": "/users/views.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "from rest_framework.generics import CreateAPIView\nfrom rest_framework import permissions\n\nfrom users.serializers import UserRegisterSerializer\n\nclass UserRegisterView(CreateAPIView):\n serializer_class = UserRegisterSerializer\n permission_classes = [permissions.AllowAny]\n" }, { "alpha_fraction": 0.5825636982917786, "alphanum_fraction": 0.5888437628746033, "avg_line_length": 46.49122619628906, "blob_id": "9cd600dbd19e5b1ff497ad3487138148ddf1a598", "content_id": "a21de3a998016b0cc33510991c63572bdf896c00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2707, "license_type": "no_license", "max_line_length": 168, "num_lines": 57, "path": "/loans/migrations/0001_initial.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "# Generated by Django 3.1.5 on 2021-01-30 14:21\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='LenmeVariables',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('lenmefee', models.FloatField()),\n ],\n ),\n migrations.CreateModel(\n name='Loans',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('loan_amount', models.PositiveIntegerField()),\n ('loan_period_in_month', models.PositiveSmallIntegerField()),\n ('loan_status', models.CharField(choices=[('pending', 'PENDING'), ('funded', 'FUNDED'), ('completed', 'COMPLETED')], default='pending', max_length=50)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('borrower', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='borrower', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='LoanPayments',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('paid_amount', models.FloatField()),\n ('paid_at', models.DateTimeField(auto_now_add=True)),\n ('loan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='loanpayment', to='loans.loans')),\n ],\n ),\n migrations.CreateModel(\n name='LoanOffers',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('annual_interest_rate', models.FloatField()),\n ('accepted', models.BooleanField(default=False)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ('updated_at', models.DateTimeField(auto_now=True)),\n ('investor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='loanoffers', to=settings.AUTH_USER_MODEL)),\n ('loan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='loanoffers', to='loans.loans')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.6209039688110352, "alphanum_fraction": 0.6209039688110352, "avg_line_length": 34.41999816894531, "blob_id": "ba0828a9735efc9e8b3b6ee630790446b2ed2abc", "content_id": "e7fe030e3a861e7fd1b4e4b362b4080ec2e64e8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1770, "license_type": "no_license", "max_line_length": 162, "num_lines": 50, "path": "/users/serializers.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom rest_framework_simplejwt.tokens import RefreshToken\nfrom rest_framework_simplejwt.serializers import TokenObtainPairSerializer\nfrom django.contrib.auth import get_user_model\n\nclass UserRegisterSerializer(serializers.ModelSerializer):\n token = serializers.SerializerMethodField()\n\n def get_token(self, user):\n refresh = RefreshToken.for_user(user)\n\n return {\n 'refresh': str(refresh),\n 'access': str(refresh.access_token),\n }\n\n def create(self, validated_data):\n user = get_user_model().objects.create_user(\n email=validated_data['email'],\n password=validated_data['password'],\n username=validated_data['username'],\n usertype=validated_data['usertype']\n )\n user.save()\n \n return user\n\n class Meta:\n model = get_user_model()\n fields = ['email', 'password', 'username','usertype', 'token']\n extra_kwargs = {'email': {'required': True},\n 'password': {'write_only': True},\n 'email': {'write_only': True},\n 'username': {'write_only': True}}\n\nclass CustomObtainTokenSerializer(TokenObtainPairSerializer):\n\n \"\"\"Custom Token Obtain to validate with username and email instead of username only\"\"\"\n\n def validate(self, attrs):\n credentials = {\n 'username': '',\n 'password': attrs.get(\"password\")\n }\n\n user_obj = get_user_model().objects.filter(email=attrs.get(\"username\")).first() or get_user_model().objects.filter(username=attrs.get(\"username\")).first()\n if user_obj:\n credentials['username'] = user_obj.username\n\n return super().validate(credentials)" }, { "alpha_fraction": 0.7182203531265259, "alphanum_fraction": 0.7182203531265259, "avg_line_length": 38.22222137451172, "blob_id": "a62d58127c926b36dead42d19e99e8f128521b63", "content_id": "c81728f67c6bb83575c2b39e545b7e33d335ad3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1416, "license_type": "no_license", "max_line_length": 114, "num_lines": 36, "path": "/loans/views.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "from functools import partial\nfrom rest_framework.generics import CreateAPIView, UpdateAPIView\nfrom rest_framework.response import Response\nfrom loans.serializers import LoanSubmitSerializer, OfferSerializer\n\nclass LoanSubmitView(CreateAPIView):\n serializer_class = LoanSubmitSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data, context={\"user\":request.user})\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response({\"status\":\"Loan Submitted, Please Check for Offers\"})\n\n\nclass OfferSubmitView(UpdateAPIView, CreateAPIView):\n\n serializer_class = OfferSerializer\n\n def get_queryset(self):\n query_set = OfferSerializer.Meta.model.objects.all()\n return query_set\n\n def update(self, request, *args, **kwargs):\n instance = self.get_object()\n serializer = self.get_serializer(instance, data=request.data, context={'user':request.user}, partial=True)\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n\n return Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data, context={\"user\":request.user})\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response(serializer.data)\n " }, { "alpha_fraction": 0.6439790725708008, "alphanum_fraction": 0.6463955044746399, "avg_line_length": 24.080808639526367, "blob_id": "18a9e26ff71c44b08314c7e7c190d4644c35f485", "content_id": "e5f56bd1fea5559a8d7dbb76cce9ffb60439e23a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2483, "license_type": "no_license", "max_line_length": 147, "num_lines": 99, "path": "/Flow_Design.md", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "# Flow Design:\n\n### APIs:\n```\n\t1. /user/register/ POST\n\t\trequest:\n\t\t\tusername: str\n\t\t\temail: str\n\t\t\tpassword: str\n\t\t\tusertype: str (investor/borrower)\n\t\tresponse:\n\t\t\trefresh: str(token)\n\t\t\taccess: str(token)\n\n\t2. /user/token/obtain/ POST\n\t\trequest:\n\t\t\tusername/email: str\n\t\t\tpassword: str\n\t\tresponse:\n\t\t\trefresh: str(token)\n\t\t\taccess: str(token)\n\n\t3. /user/token/refresh/ POST\n\t\trequest:\n\t\t\trefresh: str(token)\n\t\tresponse:\n\t\t\taccess: str(token)\t\t\t\n\n\t4. /loans/submit/ POST\n\t\trequest:\n\t\t\tuser(borrower): Bearer token(Authorization header)\n\t\t\tloan_amount: int\n\t\t\tloan_period: int\n\n\t\tresponse:\n\t\t\tmsg: \"loan submitted check later for offers\"\n\n\t5. /offer/submit/ POST\n\t\trequest:\n\t\t\tuser(investor): Bearer token(Authorization header)\n\t\t\tloan: int\n\t\t\tannual_interest_rate: float\n\n\t6. /offer/submit/<int:offer_id>/ PATCH\n\t\trequest:\n\t\t\tuser(borrower): Bearer token(Authorization header)\n\t\t\taccepted: boolean\n```\n### Database Models:\n* Loans:\n\t* attributes:\n\n\t * ***borrower:*** foriegen key(user)\n\t * ***investor:*** foriegen key(user)\n\t * ***loan_amount:*** positiveintegerfield(default:null)\n\t * ***annual_interest_rate:*** floatfield(default:null)\n\t * ***loan_period_month:*** positiveintegerfield\n\t * ***loan_status:*** charfield (choises: pending(default), funded, completed)\n\t * ***created_at:*** datetimefield\n\t * ***updated_at:*** datetimefield\n\n\t* methods:\n\n\t\t* ***payment_status():*** -> {\n\t\t\t\"paid_amount\": float,\n\t\t\t\"paid_monthes\": int,\n\t\t\t\"remain_amount\":float,\n\t\t\t\"remain_monthes\":int,\n\t\t}\n\t\t* ***monthly_payment():*** -> float\n\t\t* ***amount_winterest():*** -> float\n\t\t* ***get_annual_interest_rate():*** -> flaot\n\t\t* ***investor():*** -> object\n\n* payments:\n\t* ***loan:*** foriegen key(loans)\n\t* ***amount:*** floatfield\n\t* ***date:*** datetimefield\n\n* LenmeAccount:\n\t* ***user:*** OneToOneField(user)\n\t* ***balance:*** floatfield\n\t* ***user_type:*** charfield (choices: borrower, investor)\n\n* Lenme_Variabless:\n\t* ***Lenme_fee:*** floatfield\n\n\t*This table to make admin change the Lenme_fee anytime from admin dashboar, also to add any other fields*\n\n\n*__P.S:__ In a real project I would create a table for each Investor & Borrower and link each one with User table, to give everone it's own fields*\n*But in this assignment both will be in Lenme_Account with field user_type*\n\n\n\n### utils_functions:\n* ***check_and_make_transaction(object):*** -> None\n* ***is_loan_payment_completed(object):*** -> boolean\n* ***schedule_payment(object):*** -> boolean\n" }, { "alpha_fraction": 0.7260273694992065, "alphanum_fraction": 0.7260273694992065, "avg_line_length": 30.285715103149414, "blob_id": "fadc210223e62e50bd6f8ed820f36daa385ea081", "content_id": "8f72e0c23b242a373aa1bcbff23d3dfb8b59e30d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 62, "num_lines": 7, "path": "/loans/urls.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom loans.views import LoanSubmitView, OfferSubmitView\n\nurlpatterns = [\n path('loan/submit/', LoanSubmitView.as_view()),\n path('offer/submit/<int:pk>/', OfferSubmitView.as_view()),\n]\n" }, { "alpha_fraction": 0.5155925154685974, "alphanum_fraction": 0.7130976915359497, "avg_line_length": 16.814815521240234, "blob_id": "793f5f27f4f7fbe06ad59e1a643ea9e907acc04f", "content_id": "916c16a998ce8885dda2fdaab169c7e42d56243e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 481, "license_type": "no_license", "max_line_length": 36, "num_lines": 27, "path": "/requirements.txt", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "asgiref==3.3.1\nbackcall==0.2.0\ncertifi==2020.12.5\nchardet==4.0.0\ndecorator==4.4.2\nDjango==3.1.5\ndjango-environ==0.4.5\ndjangorestframework==3.12.2\ndjangorestframework-simplejwt==4.6.0\nidna==2.10\nipython==7.19.0\nipython-genutils==0.2.0\njedi==0.17.2\nparso==0.7.1\npexpect==4.8.0\npickleshare==0.7.5\nprompt-toolkit==3.0.14\npsycopg2-binary==2.8.6\nptyprocess==0.7.0\nPygments==2.7.4\nPyJWT==2.0.1\npytz==2020.5\nrequests==2.25.1\nsqlparse==0.4.1\ntraitlets==5.0.5\nurllib3==1.26.3\nwcwidth==0.2.5\n" }, { "alpha_fraction": 0.6830745935440063, "alphanum_fraction": 0.6861037611961365, "avg_line_length": 37.28985595703125, "blob_id": "c657bb0a89240daf1c3e5dcb89c2a4718c62a1c7", "content_id": "f6be0ddb5781d02c5b6e992e91410ccf96f36c68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2641, "license_type": "no_license", "max_line_length": 111, "num_lines": 69, "path": "/loans/models.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom loans.utils import check_and_make_transaction\n\nclass Loans(models.Model):\n loan_statuses = [('pending', 'PENDING'), ('funded', 'FUNDED'), ('completed', 'COMPLETED')]\n\n borrower = models.ForeignKey(\"users.LenmeUser\", related_name=\"borrower\", on_delete=models.CASCADE)\n loan_amount = models.PositiveIntegerField()\n loan_period_in_month = models.PositiveSmallIntegerField()\n loan_status = models.CharField(choices=loan_statuses, default=\"pending\", max_length=50)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.loan_status\n\n @property\n def investor(self):\n return self.loanoffers.get(accepted=True).investor\n\n def get_annual_interest_rate(self):\n return self.loanoffers.get(accepted=True).annual_interest_rate\n\n @property\n def amount_winterest(self):\n interest_amount = self.loan_amount*(self.get_annual_interest_rate()/100)*(self.loan_period_in_month/12)\n return self.loan_amount+interest_amount\n\n @property\n def monthly_payment(self):\n return self.amount_winterest/self.loan_period_in_month\n\n def payment_status(self):\n paid_amount = self.loanpayment.all().aggregate(total=models.Sum('paid_amount'))['total'] or 0\n paid_monthes = self.loanpayment.count()\n remain_amount = self.amount_winterest - paid_amount\n remain_monthes = self.loan_period_in_month - paid_monthes\n return {\n \"paid_amount\":paid_amount,\n \"paid_monthes\":paid_monthes,\n \"remain_amount\":remain_amount,\n \"remain_monthes\":remain_monthes\n }\n\nclass LoanOffers(models.Model):\n loan = models.ForeignKey(\"loans.Loans\", related_name=\"loanoffers\", on_delete=models.CASCADE)\n investor = models.ForeignKey(\"users.LenmeUser\", related_name=\"loanoffers\", on_delete=models.CASCADE)\n annual_interest_rate = models.FloatField()\n accepted = models.BooleanField(default=False)\n\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n if self.accepted:\n check_and_make_transaction(self)\n\nclass LoanPayments(models.Model):\n loan = models.ForeignKey(\"loans.Loans\", related_name=\"loanpayment\", on_delete=models.CASCADE)\n paid_amount = models.FloatField()\n paid_at = models.DateTimeField(auto_now_add=True)\n\n def __str__(self):\n return str(self.pk)\n\nclass LenmeVariables(models.Model):\n lenmefee = models.FloatField()" }, { "alpha_fraction": 0.7972665429115295, "alphanum_fraction": 0.7972665429115295, "avg_line_length": 35.58333206176758, "blob_id": "0abc36320a9a0eb880f4d298c285728763053c87", "content_id": "d977b683dbcdb24c37767ed605c34c0277855817", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 439, "license_type": "no_license", "max_line_length": 101, "num_lines": 12, "path": "/users/urls.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\n\nfrom users.views import UserRegisterView\nfrom users.serializers import CustomObtainTokenSerializer\n\nfrom django.urls import path\n\nurlpatterns = [\n path('register/', UserRegisterView.as_view()),\n path('token/obtain/', TokenObtainPairView.as_view(serializer_class=CustomObtainTokenSerializer)),\n path('token/refresh/', TokenRefreshView.as_view()),\n]\n" }, { "alpha_fraction": 0.6551522016525269, "alphanum_fraction": 0.6557376980781555, "avg_line_length": 39.595237731933594, "blob_id": "15de4647e0b8f2f2ad6ce961a22796f9df7c8de7", "content_id": "8b0d11a9f281c1732378f680b0bcfba67043bf75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1708, "license_type": "no_license", "max_line_length": 113, "num_lines": 42, "path": "/loans/serializers.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\nfrom loans.models import Loans, LoanOffers\n\nclass LoanSubmitSerializer(serializers.ModelSerializer):\n class Meta:\n model = Loans\n fields = ['loan_amount', 'loan_period_in_month']\n\n def create(self, validated_data):\n validated_data['borrower'] = self.context['user']\n return super().create(validated_data)\n\nclass OfferSerializer(serializers.ModelSerializer):\n class Meta:\n model = LoanOffers\n fields = ['loan', 'annual_interest_rate', 'accepted', 'id']\n read_only_fields = ['id']\n optional_fields = ['accepted']\n\n def create(self, validated_data):\n user = self.context['user']\n if user.usertype != 'investor':\n raise serializers.ValidationError({\"status\":\"Only Investors can submit offers\"})\n\n validated_data['investor'] = user\n validated_data.pop('accepted')\n instance = LoanOffers.objects.get_or_create(loan=self.validated_data['loan'], defaults=validated_data)[0]\n return instance\n\n def update(self, instance, validated_data):\n if 'accepted' not in validated_data:\n raise serializers.ValidationError({\"accepted\":[\"This field is required.\"]})\n user = self.context['user']\n if instance.loan.borrower != user:\n raise serializers.ValidationError({\"error\":\"Cannot respond to this offer, Wrong user\"})\n\n if instance.loan.loan_status != 'pending':\n raise serializers.ValidationError({\"error\":\"This offer's loan is already funded or completed\"})\n\n setattr(instance, 'accepted', validated_data['accepted'])\n instance.save(update_fields=['accepted'])\n return instance\n " }, { "alpha_fraction": 0.6868433952331543, "alphanum_fraction": 0.6868433952331543, "avg_line_length": 32.79661178588867, "blob_id": "6b979c3d8769634e345a3f2496f30595303e9f6f", "content_id": "7bffddf2f242fc249a3af5093fc08b4d4fffaaf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1999, "license_type": "no_license", "max_line_length": 107, "num_lines": 59, "path": "/loans/utils.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "from django.apps import apps\n\n\ndef check_and_make_transaction(offer_obj):\n LenmeVariables = apps.get_model(\"loans\", \"LenmeVariables\")\n\n lenme_fee = LenmeVariables.objects.last().lenmefee\n investor = offer_obj.investor\n borrower = offer_obj.loan.borrower\n loan_amount = offer_obj.loan.loan_amount\n\n if investor.userbalance >= (loan_amount+lenme_fee):\n #deduct total loan amount from investor balance\n investor.userbalance -= loan_amount+lenme_fee\n investor.save()\n\n #add loan amount to borrower balance\n borrower.userbalance += loan_amount\n borrower.save()\n\n #change loan status to funded\n offer_obj.loan.loan_status = 'funded'\n offer_obj.loan.save()\n\n ### send notification/email to both investor and borrower wheather it is successfully funded or not\n\ndef is_loan_payment_completed(loan):\n payment_status = loan.payment_status()\n if payment_status['remain_monthes'] or payment_status['remain_amount']:\n return False \n else:\n setattr(loan, 'loan_status', 'completed')\n loan.save()\n return True\n\n\ndef schedule_payment(loan):\n borrower = loan.borrower\n investor = loan.investor\n monthly_payment = loan.monthly_payment\n LoanPayments = apps.get_model(\"loans\", \"LoanPayments\")\n\n #check if borrower balance is sufficient to pay monthly payment\n if borrower.userbalance >= monthly_payment and loan.loan_status != 'completed':\n # deduct monthly payment amount from borrower's balance\n borrower.userbalance -= monthly_payment\n borrower.save()\n\n # add monthly payment to investor's balance\n investor.userbalance += monthly_payment\n investor.save()\n\n # save this payment transaction\n LoanPayments.objects.create(loan=loan, paid_amount=monthly_payment)\n\n return is_loan_payment_completed(loan)\n\n\n ### send notification/email to both investor and borrower wheather monthly payment is successful or not\n \n" }, { "alpha_fraction": 0.6992664933204651, "alphanum_fraction": 0.713936448097229, "avg_line_length": 33.16666793823242, "blob_id": "586bf10b4e604a50a2b3d97dce56c5e18c5c08b4", "content_id": "b5080194e2ecd0e100a045ac77f86b17ed17ea69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 69, "num_lines": 12, "path": "/users/models.py", "repo_name": "youssefelmasry/lenme_assignment", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\nclass LenmeUser(AbstractUser):\n user_types = [('investor', 'Investor'), ('borrower', 'Borrower')]\n\n email = models.EmailField(unique=True, max_length=254)\n usertype = models.CharField(choices=user_types, max_length=20)\n userbalance = models.FloatField(default=0)\n\n def __str__(self):\n return self.username" } ]
12
Techsrijan/mppython2021
https://github.com/Techsrijan/mppython2021
57ca26e1acdf5adad2afa692dd5ae23336273603
583a991f85e2414c6b8ffe0405f727f3f5d38eee
ff20661ef00b2db927c78f95a08cd6c40f950ee0
refs/heads/main
2023-06-18T22:05:44.602220
2021-07-16T00:42:26
2021-07-16T00:42:26
374,290,977
0
9
null
null
null
null
null
[ { "alpha_fraction": 0.5283018946647644, "alphanum_fraction": 0.6064689755439758, "avg_line_length": 13.288461685180664, "blob_id": "9014cb473c62791250c4d8c6d5c7648f0101f89b", "content_id": "536cac3baab4e1d9fdb685473d539c397dc035a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 742, "license_type": "no_license", "max_line_length": 50, "num_lines": 52, "path": "/turtleintro.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from turtle import *\nt=Turtle() # we are creating Turtle class object\ndef drawmyshape():\n for i in range(4):\n t.fd(200)\n t.left(90)\nw=Screen()\nw.title(\"My First Turtle Program\")\n#w.bgcolor(\"red\")\n#w.bgpic('turtle.gif')\nt.color('red','yellow')\nt.shape('turtle')\nt.pensize(5)\n#t.hideturtle()\nt.speed(1)\n'''for i in range(4):\n t.fd(100)\n t.left(90)\n'''\ndrawmyshape()\nt.right(90)\n\n'''\nt.forward(100)\nt.left(90)\nt.forward(100)\nt.left(90)\nt.forward(100)\nt.left(90)\nt.forward(100)\n'''\n#t.penup()\nt.pu()\nt.fd(100)\nt.pendown()\nt.color('blue','orange')\nt.begin_fill()\ndrawmyshape()\n'''\nfor i in range(4):\n t.fd(100)\n t.left(90)\n'''\n'''t.fd(100)\nt.left(90)\nt.fd(100)\nt.left(90)\nt.fd(100)\nt.left(90)\n'''\nt.end_fill()\ndone()" }, { "alpha_fraction": 0.6759259104728699, "alphanum_fraction": 0.7037037014961243, "avg_line_length": 29.928571701049805, "blob_id": "00b8eac01ad66844ad0e29836db659ac663b28ec", "content_id": "4c9f5c3d2cbb97f084846ce7d652ca04742f1173", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 432, "license_type": "no_license", "max_line_length": 82, "num_lines": 14, "path": "/messagebox.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import messagebox\nroot=Tk()\ndef open_file():\n result=messagebox.askyesnocancel(\"Save Dialog Box \",\"Do u want to save File?\")\n #result=messagebox.askquestion(\"Save Dialog Box \",\"Do u want to save File?\")\n print(result)\n if result=='yes':\n print(\"Your File is Saved\")\n\nbtn=Button(root,text=\"Save File\",command=open_file)\nbtn.pack()\nroot.geometry(\"400x400+120+120\")\nroot.mainloop()" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 12, "blob_id": "a0da1658e755700f9c74e4fcab3662c6c6d07b34", "content_id": "588b8831a9aa5d819689899dd0100e0093ed4b9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 52, "license_type": "no_license", "max_line_length": 19, "num_lines": 4, "path": "/testmymodule.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "import mymodule\n\nmymodule.add(55,66)\nmymodule.msg()\n" }, { "alpha_fraction": 0.6479189991950989, "alphanum_fraction": 0.681664764881134, "avg_line_length": 27.70967674255371, "blob_id": "8c3d70a8b1ebdb832ab316cc45861dba72436737", "content_id": "36d023d9811108e8ef81a5b8cbfeef37979b1bf4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 889, "license_type": "no_license", "max_line_length": 64, "num_lines": 31, "path": "/insertintodata.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nimport pymysql\nroot=Tk()\nwidth=root.winfo_screenwidth()\nheight=root.winfo_screenheight()\nprint(width,height)\nconn=pymysql.connect(host=\"localhost\",user=\"root\",db=\"mppython\")\nmycursor=conn.cursor()\ndef insert_data():\n name1=i.get()\n age1=j.get()\n print(name1,age1)\n que = \"insert into user_info(name,age)values(%s,%s)\"\n val=(name1,age1)\n mycursor.execute(que,val)\n conn.commit()\n print(\"Data stored Successfully\")\n i.set('')\n j.set('')\ni=StringVar()\ntxt1=Entry(root,font=(\"Comic Sans Ms\",20,'bold'),textvariable=i)\ntxt1.pack()\nj=StringVar()\ntxt2=Entry(root,font=(\"Comic Sans Ms\",20,'bold'),textvariable=j)\ntxt2.pack()\nbtn=Button(root,text=\"Insert\",fg=\"blue\",bg=\"yellow\",\n font=(\"Comic Sans Ms\",20,'bold'),command=insert_data)\nbtn.pack()\n#root.geometry(\"400x400+300+150\")\nroot.geometry(\"%dx%d+0+0\"%(width,height))\nroot.mainloop()" }, { "alpha_fraction": 0.3973063826560974, "alphanum_fraction": 0.4848484992980957, "avg_line_length": 7.25, "blob_id": "3c49ca0c64b879579abaa431bae14b838cd43fc8", "content_id": "6ab174522ec492c16f6f054e9542b3cd18228445", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 20, "num_lines": 36, "path": "/matrixoperation.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from numpy import *\na=array([\n [1,3,5],\n [3,5,7],\n [4,6,8]\n])\nprint(a)\nb=array([\n [1,22,5],\n [3,7,7],\n [45,6,8],\n\n])\n\nprint(b)\n\n#print(a+b)\n\n'''\n2*3 r c\n3*2 r c\n\n2*2\n\n'''\n\nc=dot(a,b)\nprint(c)\n\nd=a@b\nprint(d)\n\nprint(a.transpose())\nprint(diagonal(a))\nprint(a.min())\nprint(a.max())\n" }, { "alpha_fraction": 0.5242718458175659, "alphanum_fraction": 0.5800970792770386, "avg_line_length": 20.736841201782227, "blob_id": "ed52f75f8ce357d7ca3adeeb97e014c2ca42880f", "content_id": "fb2fb8626f0ad75b3aabd7ac978d9b3430996f76", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 412, "license_type": "no_license", "max_line_length": 63, "num_lines": 19, "path": "/greatestamongthree.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "a=int(input(\"enter any number\"))\nb=int(input(\"enter any number\"))\nc=int(input(\"enter any number\"))\nprint(\"a=\",a,\" b=\",b,\" c=\",c)\nif a>b:\n if a>c:\n print(\"A is greatest\")\n else:\n print(\"C is greatest\")\nelif b>c:\n print(\"b is greatest\")\nelse:\n print(\"C is greatest\")\n\n''''\n every 4th century is leap year no other century is leap year\n i.e. 400,800,1200,1600,2000,2400,....\n \n '''" }, { "alpha_fraction": 0.6583541035652161, "alphanum_fraction": 0.6633416414260864, "avg_line_length": 13.357142448425293, "blob_id": "7a37b77eb1e5f24bbbd0924f1c93b31fdc1af3e5", "content_id": "5cb9c08cac383ef248881c8c41927122cc071180", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 49, "num_lines": 28, "path": "/ifelseexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "age=int(input(\"enter your age\"))\nprint(\"Age=\",age)\nif age>=18:\n print(\"You are Elligible for Licence\")\nelse:\n print(\"You are not elligible\")\n\n\n\n\n\n\n'''\nif(condition)\n {\ntask to be excecuted if condition is true\n }\nelse\n{\ntask to be excecuted if condition is false\n}\n\nif condition:\n task to be excecuted if condition is true\nelse:\n task to be excecuted if condition is false \n\n'''" }, { "alpha_fraction": 0.6641509532928467, "alphanum_fraction": 0.6716980934143066, "avg_line_length": 11.666666984558105, "blob_id": "2d3fef2ac491a913997ce18cdcf2def1da10090c", "content_id": "b1edc361697f2471554ef2d259d3d6155fab96af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 265, "license_type": "no_license", "max_line_length": 50, "num_lines": 21, "path": "/functionintro.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\nfunction\n 1. library or predefined function (sqrt,pow,ceil)\n 2. user defined function\n\n'''\n# function declaration/definition\ndef greet():\n print(\"Good Afternoon\")\n\ndef msg():\n print(\"Good Evening\")\n\n\n# function call\nmsg()\ngreet()\nmsg()\nmsg()\ngreet()\nmsg()" }, { "alpha_fraction": 0.5300353169441223, "alphanum_fraction": 0.6431095600128174, "avg_line_length": 20.769229888916016, "blob_id": "4aa8ed6c6400af435afdf454bb63707d12faa913", "content_id": "92a2e63a770f9c0ae74cce79cd7cb87c2c8285ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 65, "num_lines": 13, "path": "/keywordvariablelegth.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "def person(name,*data):\n print(name)\n print(data)\n\nperson('ashwani',9956477677,'gkp',36,373000,'Male')\n\ndef person1(name,**data):\n print(name)\n print(data)\n for i,j in data.items():\n print(i,j)\n\nperson1(name='ashwani',age=66,gender='Male',contact='9956477677')\n" }, { "alpha_fraction": 0.606249988079071, "alphanum_fraction": 0.628125011920929, "avg_line_length": 15.894737243652344, "blob_id": "15b9d88082f3b83abf5f69160765a76ba3b9a677", "content_id": "e95ea9e0c05aa6d23745d273c0050437e66f5e40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 320, "license_type": "no_license", "max_line_length": 53, "num_lines": 19, "path": "/filehandling.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "f=open('ram.txt',\"r\")\nprint(f)\n\n# to read the contents of file\n\nfor data in f:\n print(data,end=\"\")\n\n'''f1=open('Mohan.txt','a')\nprint(f1)\nmsg=input(\"Enter the text u want to write in a file\")\n\nf1.write(msg)\n'''\nf4=open('myturtle.gif','wb')\nf3=open('turtle.gif','rb')\nfor data in f3:\n print(data)\n f4.write(data)" }, { "alpha_fraction": 0.5930736064910889, "alphanum_fraction": 0.5930736064910889, "avg_line_length": 22.200000762939453, "blob_id": "a19137e2bc295d4d4e9c77c15d61e3a9e4d708f9", "content_id": "81994128fa875ec38b52ef7cf8ec19866fc7810f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 41, "num_lines": 10, "path": "/inputmorethanone.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''f=int(input(\"Enter the first number\"))\ns=int(input(\"Enter the Second number\"))\n'''\n\nf,s=input(\"Enter two number\").split(',')\nprint(\"F=\",f,\"S=\",s)\n\nj,k=input(\"Enter two number\").split(' ')\nprint(\"j=\",j,\"k=\",k)\nprint(\"add=\",j+k)" }, { "alpha_fraction": 0.6767676472663879, "alphanum_fraction": 0.7340067625045776, "avg_line_length": 26.090909957885742, "blob_id": "4c6909f696a6f807f0f67f774ca03bdf284cacaa", "content_id": "2c0ecb50ace49b2d6963b3e006bf3a1464a998f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 297, "license_type": "no_license", "max_line_length": 57, "num_lines": 11, "path": "/imagehelp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom PIL import Image,ImageTk\nroot=Tk()\nroot.config(bg='yellow')\nexit_image=ImageTk.PhotoImage(Image.open('images/3.png'))\nbtn3=Button(root,text=\"Exit\",image=exit_image,\n command=quit)\nbtn3.pack()\nroot.resizable(0,0)\nroot.geometry(\"400x400+300+150\")\nroot.mainloop()" }, { "alpha_fraction": 0.6093418002128601, "alphanum_fraction": 0.6464968323707581, "avg_line_length": 26.735294342041016, "blob_id": "b50815c261e76d3a3ecb370d1a0ca008d6255d8b", "content_id": "424ba46954591bd189392c7e340813cf48f197c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 942, "license_type": "no_license", "max_line_length": 69, "num_lines": 34, "path": "/tkinterintro.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot=Tk()\n\ndef msg():\n print(\"Good morning\")\n\ndef msg2():\n print(\"Good Afternoon\")\nroot.title(\"My First Window\")\nlabel=Label(root,text=\"Enter Your Name\",bg=\"red\",fg=\"yellow\",\n font=(\"Comic Sans Ms\",20,'bold'))\nlabel.pack(side=TOP)\n\nlabel1=Label(root,text=\"Enter Your Father Name\",bg=\"red\",fg=\"yellow\",\n font=(\"Comic Sans Ms\",20,'bold'))\nlabel1.pack(side=BOTTOM)\n#label1.pack(side=LEFT)\nbtn=Button(root,text=\"Click Me\",fg=\"blue\",bg=\"yellow\",\n font=(\"Comic Sans Ms\",20,'bold'),command=msg)\nbtn.pack()\n\nbtn1=Button(root,text=\"Msg ME\",fg=\"blue\",bg=\"yellow\",\n font=(\"Comic Sans Ms\",20,'bold'),command=msg2)\nbtn1.pack(fill=X)\n\ntxt=Entry(root,font=(\"Comic Sans Ms\",20,'bold'))\ntxt.pack()\n\nbtn3=Button(root,text=\"Exit\",fg=\"blue\",bg=\"yellow\",\n font=(\"Comic Sans Ms\",20,'bold'),command=quit)\nbtn3.pack()\nroot.resizable(0,0)\nroot.geometry(\"400x400+300+150\")\nroot.mainloop()" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.6774193644523621, "avg_line_length": 20, "blob_id": "589120970ddfdd29728158001a1a088942a4ce7b", "content_id": "cd6e3d26cd34c29d40ea31607176661f8a76a885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 20, "num_lines": 3, "path": "/ankitbaday.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "print(\"Happy B-day\")\nprint(\"Happy B-day\")\nprint(\"Happy B-day\")" }, { "alpha_fraction": 0.6555555462837219, "alphanum_fraction": 0.6944444179534912, "avg_line_length": 23.066667556762695, "blob_id": "4b4015b1f92b916b122bd4784e06883466584b5c", "content_id": "4022385d13b2ff3ccf36dbf28c5f31950d10a49d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 360, "license_type": "no_license", "max_line_length": 74, "num_lines": 15, "path": "/simpledialogexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import simpledialog\nroot=Tk()\nroot.config(bg='yellow')\ndef input_data():\n sum=0\n for i in range(5):\n s=simpledialog.askinteger(\"Enter Marks of Students\",\"Enter Marks\")\n sum=sum+s\n print(sum)\n\nbtn=Button(root,text=\"getData\",command=input_data)\nbtn.pack()\nroot.geometry(\"400x400+120+120\")\nroot.mainloop()" }, { "alpha_fraction": 0.5418060421943665, "alphanum_fraction": 0.5953177213668823, "avg_line_length": 11, "blob_id": "5cff63e76cbab27d21eb0088abdb19b7e9b17721", "content_id": "680ca79f5bb99f1fc14155abc45d734bf3f71f43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 299, "license_type": "no_license", "max_line_length": 20, "num_lines": 25, "path": "/controlturtleusingarrowkey.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from turtle import *\nt=Turtle()\n\ndef up():\n t.forward(100)\n\ndef down():\n t.backward(200)\n\ndef right():\n t.right(90)\n t.forward(100)\n\ndef left():\n t.left(90)\n t.forward(100)\n\nw=Screen()\nw.onkey(up,\"Up\")\nw.onkey(down,\"Down\")\nw.onkey(left,\"Left\")\nw.onkey(right,\"R\")\nw.listen()\n\ndone()" }, { "alpha_fraction": 0.3333333432674408, "alphanum_fraction": 0.3777777850627899, "avg_line_length": 18.428571701049805, "blob_id": "0a8012ea1259fada35660c2acd424fd3702393b2", "content_id": "4e2a67524fefa8bd79b3a11507f77ffe8f5be9c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 41, "num_lines": 7, "path": "/evenoddexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "i=1\nwhile i<=10:\n if i%2==0:\n print(\"Even no= \",i, \" \", end=\"\")\n else:\n print(\"Odd no= \", i, \" \", end=\"\")\n i=i+1" }, { "alpha_fraction": 0.6124338507652283, "alphanum_fraction": 0.6402116417884827, "avg_line_length": 18.8157901763916, "blob_id": "0f320f69b993851a3b1923f51d103768f893fd2f", "content_id": "fe4a19b4b29a4a90f2e640a998e8942acb3ce776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 756, "license_type": "no_license", "max_line_length": 51, "num_lines": 38, "path": "/StringExample.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "s='welcome in Python in'\nprint(len(s))\nprint(s)\nprint(s[0:])\nprint(s[0::2])\nprint(s.title())\nprint(s.capitalize())\nprint(s.lower())\nprint(s.upper())\nprint(s.swapcase())\nprint(s.count('in'))\nname='Ram'\nprint(name.isalpha())\nstr=\"Ram123st\"\nprint(str.isalnum())\nmobile='9956477677'\nprint(mobile.isdigit())\ntest='Her name is Tammana and Tammana is good girl'\nprint(test.replace('Tammana','Sonia'))\na='!!!!!!!!! Welcome to India '\nprint(a,end=\"\")\nprint('Hello')\nprint(a.strip('!'))\nprint(a.rstrip( ))\nprint(a.lstrip( ))\n\ntext=\"hello, how, r, you\"\nprint(text.split(','))\n\nprint(text.find('hjfgf'))\n\nstr2='Welcome'\nprint(str2.endswith('e'))\nprint(str2.startswith('W'))\nif str2[0]=='w':\n print('Item found')\nelse:\n print('item not found')\n\n\n\n" }, { "alpha_fraction": 0.6246106028556824, "alphanum_fraction": 0.6464174389839172, "avg_line_length": 25.75, "blob_id": "4e0593e0c4abc342362cbb9d778e17f35280d4f9", "content_id": "05450350e01c0c42b9bc7165aef60018b02071e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 642, "license_type": "no_license", "max_line_length": 70, "num_lines": 24, "path": "/openfileexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import filedialog\nroot=Tk()\n\ndef open_file():\n r=filedialog.askopenfile(initialdir=\"/\",title=\"Open File\",\n filetype=(('All Files','*.*'),('Text File','*.txt')))\n print(r)\n for data in r:\n print(data,end=\"\")\n txt.insert(INSERT,data)\ndef Save_file():\n f=filedialog.asksaveasfile(mode=\"w\",defaultextension=\"*.txt\")\n f.write('Welcome')\n f.close()\n print(f)\ntxt=Text(root)\ntxt.pack()\nbtn=Button(root,text=\"Open File\",command=open_file)\nbtn.pack()\nbtn2=Button(root,text=\"Save File\",command=Save_file)\nbtn2.pack()\nroot.geometry(\"600x600+120+120\")\nroot.mainloop()\n" }, { "alpha_fraction": 0.6729190945625305, "alphanum_fraction": 0.7104337811470032, "avg_line_length": 22.08108139038086, "blob_id": "f2ab954b21ada76eda2442733613535ee9b0db71", "content_id": "36b69c6876b381962399c49fd9e6bbdffc959e85", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 853, "license_type": "no_license", "max_line_length": 73, "num_lines": 37, "path": "/textarea.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot=Tk()\nroot.config(bg=\"yellow\")\ndef get_text_data():\n result=text.get(1.0,END)\n print(result)\n\ndef get_select_text_data():\n result=text.selection_get()\n print(result)\n\ndef clear_text():\n text.delete(1.0,END)\n\ndef position():\n res=text.selection_get()\n pos=text.search(res,1.0,stopindex=END)\n print(pos)\n\ntext=Text(root,height=10,width=50,wrap=WORD,padx=10,pady=10,\n selectbackground='red')\ntext.pack()\ntext.insert(INSERT,\"Hi Welocome You All\")\n\nbtn=Button(root,text=\"print data\",command=get_text_data)\nbtn.pack()\n\nbtn2=Button(root,text=\"print selected data\",command=get_select_text_data)\nbtn2.pack()\n\nbtn3=Button(root,text=\"clear text\",command=clear_text)\nbtn3.pack()\n\nbtn4=Button(root,text=\"select element position\",command=position)\nbtn4.pack()\nroot.geometry(\"400x400+150+150\")\nroot.mainloop()" }, { "alpha_fraction": 0.5926892757415771, "alphanum_fraction": 0.6240208745002747, "avg_line_length": 26.428571701049805, "blob_id": "d0d3e5d7dbdc32a155c1199a36f4ce83b96ad953", "content_id": "1faa531615526bd9635965b3b6890c8999cb9022", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 383, "license_type": "no_license", "max_line_length": 67, "num_lines": 14, "path": "/tkinterobjectoriented.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nclass MyWindow:\n def msg(self):\n print(\"Good morning\")\n def __init__(self,window):\n self.button=Button(window,text=\"Click me\",command=self.msg,\n fg=\"red\",bg=\"yellow\")\n self.button.pack()\n\nroot=Tk()\nwindow=MyWindow(root)\nroot.title(\"My Object oriented Window\")\nroot.geometry(\"400x400+150+150\")\nroot.mainloop()" }, { "alpha_fraction": 0.46666666865348816, "alphanum_fraction": 0.5666666626930237, "avg_line_length": 9.333333015441895, "blob_id": "d42a338deb357a0df241f2508d522ca3cafda20c", "content_id": "58825c70a0cd8ac3e5d29a3ab486267c7a2346c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 15, "num_lines": 3, "path": "/third.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "a,b=10,5\nc=a+b\nprint(\"sum=\",c)" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 29, "blob_id": "36b9289a0083a803a7f76bc4e42dcc11e3a019fa", "content_id": "3208aac707720a9927396c88d1e927866ff01cb4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30, "license_type": "no_license", "max_line_length": 29, "num_lines": 1, "path": "/demo.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "print(\"My first shared file\")\n" }, { "alpha_fraction": 0.6570512652397156, "alphanum_fraction": 0.7003205418586731, "avg_line_length": 19.83333396911621, "blob_id": "3315215abc8b01e2ad64bd626009b7e75968097f", "content_id": "1c189c6f28e35601f8584f1dd78d69b16ddb1582", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 624, "license_type": "no_license", "max_line_length": 57, "num_lines": 30, "path": "/typesofargumentpassfunction.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\nTypes of actual argument to function\n1. positional argument\n2. keyword argument\n3. default argument\n4. variable length argument (*)\n5. keyword variable length argument\n'''\n#1. positional\ndef person(name,age):\n print(\"Name=\",name)\n age=age+10\n print(\"Age=\",age)\n\nperson('Ashwani',38)\n#person(55,'rohit')\n#2. keyword argument\nperson(name='ram',age=35)\n#a=500\n#b='ttt'\n#person(age=a,name=b)\nperson(age=18,name='vishal')\n\n#3. default argument\ndef getpersondata(name,age=18): #default argument age=18\n print(\"Name=\", name)\n print(\"Age=\", age)\n\ngetpersondata(name='sapana')\ngetpersondata(name='bandana',age=15)" }, { "alpha_fraction": 0.6314152479171753, "alphanum_fraction": 0.729393482208252, "avg_line_length": 39.25, "blob_id": "e7f67cff648cbaa62f8c58566ec82709618bd87e", "content_id": "f8b23bca65e03bf80222df43a8ea4abc8669fd34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 643, "license_type": "no_license", "max_line_length": 80, "num_lines": 16, "path": "/canvasexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "\"\"\"The Canvas is a rectangular area intended for drawing pictures or\nother complex layouts.\nYou can place graphics, text, widgets or frames on a Canvas.\n\"\"\"\nfrom tkinter import *\nroot=Tk()\ncanvas=Canvas(root,height=500,width=500,bg=\"red\")\ncanvas.create_text(300,50,text=\"Canvas window layout\",\n font=\"Times 20 italic bold\",fill=\"yellow\")\ncanvas.create_line(0,0,500,500,fill=\"white\",width=10)\ncanvas.create_rectangle(250,250,100,100,fill=\"yellow\",outline=\"blue\",width=\"10\")\ncanvas.create_oval(250,250,100,100,fill=\"white\",outline=\"blue\",width=\"10\")\n# arc #polygon\ncanvas.pack()\nroot.geometry(\"600x600+150+150\")\nroot.mainloop()" }, { "alpha_fraction": 0.6614457964897156, "alphanum_fraction": 0.6698794960975647, "avg_line_length": 33.58333206176758, "blob_id": "d72984d7b2098f2e64e63ae48c536cb212b54e03", "content_id": "74d27c30275e03375036b60c00140c63bd714ed7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 830, "license_type": "no_license", "max_line_length": 70, "num_lines": 24, "path": "/databasewithtrycatch.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "import pymysql\n\ntry:\n conn=pymysql.connect(host=\"localhost\",user=\"root\",db=\"mppython\")\n print(\"connection established\")\n mycursor=conn.cursor()\n #que=\"create table user_info(name varchar(50),age int(3))\"\n #que=\"insert into user_info(name,age)values('Mohan',99)\"\n #que=\"update user_info set name='Shyam' where name='Ram'\"\n que=\"select * from user_info\"\n mycursor.execute(que)\n record=mycursor.fetchall()\n print(record)\n for row in record:\n print(\"Name=\",row[0])\n print(\"Age=\", row[1])\n conn.commit() # to save the result(data) of execution of any query\n #print(\"table Created Successfully\")\n print(\"Data Stored successfully\")\n #print(\"Data Updated successfully\")\nexcept ValueError as e:\n print(\"Error in connection\")\nexcept Exception:\n print(\"Error in connection\")\n" }, { "alpha_fraction": 0.5809524059295654, "alphanum_fraction": 0.5904762148857117, "avg_line_length": 17.30434799194336, "blob_id": "09e6697c5a5f15bdd67b939d8114f410d92af81a", "content_id": "48fd0206530e1d3074a0b12e85fe2bf038252d95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 420, "license_type": "no_license", "max_line_length": 58, "num_lines": 23, "path": "/inheritanceimple.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "class Room:\n def get_room_dimension(self,l,b):\n self.l=l\n self.b=b\n\n def cal_area(self):\n area=self.l*self.b\n print(\"Area=\",area)\n\nclass Kitchen_room(Room): #kitchen_room inherit Room\n def msg(self):\n print(\"this is kitchen room\")\n\n\nk=Kitchen_room()\nk.msg()\nk.get_room_dimension(5,2) # room or parent\nk.cal_area()\n\n'''\nr=Room()\nr.get_room_dimension(5,2)\nr.cal_area()'''" }, { "alpha_fraction": 0.690883219242096, "alphanum_fraction": 0.690883219242096, "avg_line_length": 23.13793182373047, "blob_id": "d383cd1dd12e8d0b3249e99be15f259c6f068611", "content_id": "e7664ca83412fd3faaa65f41a667b6a6b3f89cfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 702, "license_type": "no_license", "max_line_length": 57, "num_lines": 29, "path": "/runtimeerrorexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "\n# this code is risky code\n# because it may contain run time error\n# then will i do\n# for this use try except mechanism\n# finally always executes weather exception occurs or not\n# there is only one finally\n\n# try-except\n# try-except-finally recommended\n# try-finally\ntry:\n a=int(input(\"Enter the value of a\"))\n b=int(input(\"Enter the value of b\"))\n print(\"a=\",a,\" b=\",b)\n d=a/b\n print(\"Divi=\",d)\n #print(\"thanks for using this program\")\n\nexcept ZeroDivisionError:\n print(\"Value of b can not be zero\")\n\nexcept ValueError:\n print(\"You have enter a character not a number\")\n\nexcept Exception:\n print(\"Something Went Wrong\")\n\nfinally:\n print(\"thanks for using this program\")\n\n" }, { "alpha_fraction": 0.48076921701431274, "alphanum_fraction": 0.5076923370361328, "avg_line_length": 11.949999809265137, "blob_id": "9acfc1eb9f3e9b433ce06fb8a89a5e4822109d97", "content_id": "af2360ee153c746fcfe3495e0a8331e633ab9706", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 39, "num_lines": 20, "path": "/fibonacci.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\n0 1 1 2 3\nf s\n t\n f s\n t\n'''\n\nf=int(input(\"Enter the first number\"))\ns=int(input(\"Enter the Second number\"))\nn=int(input(\"Enter the no of terms\"))\n\nprint(f,\" \",s,end=\"\")\ni=1\nwhile i<=n:\n t=f+s\n print(\" \",t,\" \",end=\"\")\n f=s\n s=t\n i=i+1\n\n" }, { "alpha_fraction": 0.6678445339202881, "alphanum_fraction": 0.6996466517448425, "avg_line_length": 28.842105865478516, "blob_id": "3212be534a67a43d42c2e65801d7bf353a46d938", "content_id": "4699fdc3782e4093989e6a8f260be44ce849d244", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "no_license", "max_line_length": 46, "num_lines": 19, "path": "/result.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "h=int(input(\"enter the marks of hindi\"))\ne=int(input(\"enter the marks of english\"))\nm=int(input(\"enter the marks of math\"))\ns=int(input(\"enter the marks of science\"))\na=int(input(\"enter the marks of art\"))\ntotal_obt_marks=h+e+m+s+a\nprint(\"Total marks obtained=\",total_obt_marks)\n\nper=(total_obt_marks*100)/500\nprint(\"Percentage=\",per)\n\nif per<33:\n print(\"You are Fail\")\nelif per>=33 and per<45:\n print(\"You are Passed Third division\")\nelif per>=45 and per<60:\n print(\"You are Passed second division\")\nelif per>=60:\n print(\"You are Passed First division\")" }, { "alpha_fraction": 0.640625, "alphanum_fraction": 0.69921875, "avg_line_length": 20.41666603088379, "blob_id": "001c7ec41ddf03ef11ec8daa1cb057973ba71bd8", "content_id": "1aa21db76d88ee7eebe6ab6e28319bf562b6a2c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 38, "num_lines": 12, "path": "/filtermapreduce.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "# filter(), map() and reduce()\nfrom functools import reduce\nnum=[1,5,7,3,6,8,24,45,96]\neven=list(filter(lambda n:n%2==0,num))\nprint(even)\n\n# map operates on filter result\ndata=list(map(lambda n:n*2,even))\nprint(data)\n\nt=reduce(lambda a,b:a+b,data)\nprint(t)" }, { "alpha_fraction": 0.6898733973503113, "alphanum_fraction": 0.7056962251663208, "avg_line_length": 17.647058486938477, "blob_id": "15e323d5b757cdabf033484c53652279638d5bda", "content_id": "6833c9538f6a4fa58617bb6f4aab82f146674c7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 316, "license_type": "no_license", "max_line_length": 38, "num_lines": 17, "path": "/Add.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "a=int(input(\"enter any number\"))\nprint(\"Value of a=\",a)\nb=int(input(\"enter any number\"))\nprint(\"Value of b=\",b)\n\nc=a+b\nprint(\"Sum=\",c)\n\n# you have to implement these programs\n'''\nMultiline comment\n1. Add,Sub,Mul,Division\n2. Area Of circle\n3. Simple Interest\n4. Area Of Triangle\n5. degree centigrate to Farenhight\n'''" }, { "alpha_fraction": 0.5600676536560059, "alphanum_fraction": 0.5939086079597473, "avg_line_length": 25.81818199157715, "blob_id": "b6d08a101260394c49496673d83cac5f6767c870", "content_id": "48cf2b4d9131b732ef1b1fd0753317225e855928", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 52, "num_lines": 22, "path": "/quadratic.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "#import math\n#import math as sapana\nfrom math import *\nprint(\"enter the coffecients of quadratic equation\")\na=float(input(\"Enter the value of a\"))\nb=float(input(\"Enter the value of b\"))\nc=float(input(\"Enter the value of c\"))\nprint(\"a=\",a,\" b=\",b,\" c=\",c)\nd=b*b-4*a*c\nprint(\"D=\",d)\nif d==0:\n print(\"Roots are real and equal\")\n x1=-b/(2*a)\n x2=x1\n print(\"Roots are x1=\",x1,\" x2=\",x2)\nelif d>0:\n print(\"Roots are real and unequal\")\n x1=(-b+sqrt(d))/(2*a)\n x2 = (-b - sqrt(d)) / (2 * a)\n print(\"Roots are x1=\", x1, \" x2=\", x2)\nelif d<0:\n print(\"roots are imaginary\")\n\n" }, { "alpha_fraction": 0.38661709427833557, "alphanum_fraction": 0.5130111575126648, "avg_line_length": 11.857142448425293, "blob_id": "272b31bdf9f8bc6ce657fb4328c701e9f979df78", "content_id": "1ff08da9e5955db0ff8fcaffbcc8dd88e596ae15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 269, "license_type": "no_license", "max_line_length": 55, "num_lines": 21, "path": "/whileassignment.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''i=63\nwhile i<=630:\n print(i,\" \",end=\"\")\n i=i+63\n'''\n'''\n2*1=2\n2*2=4\n2*3=6\n2*4=8\n...\n2*10=20\nhere 2 is the no input by user\nand 1,2,3...10 \n'''\nn=int(input(\"Enter the number for which u want table\"))\ni=1\nwhile i<=10:\n t=n*i\n print(n,\"*\",i,\"=\",t)\n i=i+1" }, { "alpha_fraction": 0.6022964715957642, "alphanum_fraction": 0.630480170249939, "avg_line_length": 20.795454025268555, "blob_id": "5ba6f670fcd5c3bff7390df6774cfda20fc4b944", "content_id": "61c51e37b8848ad0be737e790d2ff7db4f1abfae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 958, "license_type": "no_license", "max_line_length": 53, "num_lines": 44, "path": "/typeofmetgodinclass.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\ntypes of method\n1. instance method -- Which accepts self as parameter\n (a). Accessor Method -only fetch the value\n (b). Mutator Method - Set the value\n2. class method --- which accepts cls as parameter\n\n3. static method-- which accepts no parameter\n\n'''\n\nclass Student:\n school='techsrijan' # class variable\n\n\n @classmethod # decorator\n def get_school(cls):\n return cls.school\n\n def __init__(self,m1,m2,m3): # instance method\n self.m1=m1\n self.m2=m2\n self.m3=m3\n\n def get_m2(self): # instance--accessor --fetch\n return (self.m2)\n\n def set_m2(self): # instance --mutator --set\n self.m2=500\n\n @staticmethod\n def msg(): # static method --no parameter\n print(\"This is static method\")\n\n\n#static -method --classname.method()\nStudent.msg()\n# class method --classname.method()\nprint(Student.get_school())\n'''\namit=Student(10,20,30)\namit.set_m2()\nprint(amit.get_m2())\n'''" }, { "alpha_fraction": 0.5769230723381042, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 25.16666603088379, "blob_id": "f299c652eb7bc743db61a557275ec35b4d7bcc2c", "content_id": "ea933f5c08eec41aa48adf4b24d4bcf66650603e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 156, "license_type": "no_license", "max_line_length": 57, "num_lines": 6, "path": "/inputmultipleintegervalue.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "a,b,c,d=[int(i) for i in input(\"enter 4 number\").split()]\nprint(\"A=\",a,type(a))\nprint(\"B=\",b,type(b))\nprint(\"C=\",c,type(c))\nprint(\"D=\",d,type(d))\nprint(c+d)" }, { "alpha_fraction": 0.6410890817642212, "alphanum_fraction": 0.676980197429657, "avg_line_length": 27.89285659790039, "blob_id": "d8b59725fd6096eb59fac83783e48d685ac0a1c0", "content_id": "53682e5a90daa28b492fe8dec4032b56f698e913", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 808, "license_type": "no_license", "max_line_length": 66, "num_lines": 28, "path": "/mouseevent.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot=Tk()\n\ndef leftclick(event=\"\"):\n print(\"mouse leftbutton is clicked\")\n\ndef rightclick(event):\n print(\"mouse right button is clicked\")\ndef middlebuttonclick(event):\n print(\"mouse middle button is clicked\")\n\nbtn=Button(root,text=\"Left click\",fg=\"blue\",bg=\"yellow\",\n font=(\"Comic Sans Ms\",20,'bold'))\nbtn.bind(\"<Button-1>\",leftclick)\nroot.bind(\"<Control-u>\",leftclick)\nbtn.pack()\n\nbtn1=Button(root,text=\"Right Click\",fg=\"blue\",bg=\"yellow\",\n font=(\"Comic Sans Ms\",20,'bold'))\nbtn1.bind(\"<Button-2>\",middlebuttonclick)\nbtn1.pack()\nbtn3=Button(root,text=\"Middle Button click\",fg=\"blue\",bg=\"yellow\",\n font=(\"Comic Sans Ms\",20,'bold'))\nbtn3.bind(\"<Button-3>\",rightclick)\nbtn3.pack()\nroot.resizable(0,0)\nroot.geometry(\"400x400+300+150\")\nroot.mainloop()" }, { "alpha_fraction": 0.647773265838623, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 18.799999237060547, "blob_id": "67ade0a10d362f2bf690a2bc231f02afe0dbafc0", "content_id": "ed0b57ba3d2c0039ef9b518d54dd8f97c2c02169", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 494, "license_type": "no_license", "max_line_length": 62, "num_lines": 25, "path": "/runtimeuserinputanarray.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from array import *\n\nmarks=array('i',[]) # to create an empty array\nn=int(input(\"How many students marks u want to store\"))\nprint(\"No of students=\",n)\n\nfor i in range(n):\n x=int(input(\"Enter the marks of student\"))\n marks.append(x)\n\nprint(marks)\n\nsearch=int(input(\"Enter the marks of student u want to find\"))\n\nloc=0\nfor j in marks:\n if j==search:\n print(\"Item found at location=\",loc+1)\n break\n loc=loc+1\nelse:\n print(\"item not found\")\n\n\nprint(marks.index(search))" }, { "alpha_fraction": 0.6575342416763306, "alphanum_fraction": 0.7123287916183472, "avg_line_length": 28.266666412353516, "blob_id": "6a379a0dac73ccaf0ffce3d5a77ff1850af17499", "content_id": "983fa1ba0df9c68b9d920dd85f3956ed237466be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 64, "num_lines": 15, "path": "/toplevelwindow.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot=Tk()\ndef open_window():\n top=Toplevel(root)\n top.title(\"Child Window\")\n btn_destroy = Button(top, text=\"close\", command=top.destroy)\n btn_destroy.pack()\n top.geometry(\"400x400+150+150\")\nroot.title(\"Main Window\")\nbtn=Button(root,text=\"open new window\",command=open_window)\nbtn.pack()\nbtn_close=Button(root,text=\"Quit\",command=quit)\nbtn_close.pack()\nroot.geometry(\"400x400+150+150\")\nroot.mainloop()" }, { "alpha_fraction": 0.5572916865348816, "alphanum_fraction": 0.578125, "avg_line_length": 12.785714149475098, "blob_id": "bd562ba8cf34b1ba51473b515f6f70ce6291107c", "content_id": "b287d62f4fb2f613581060fade988bfbdb0d44da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 38, "num_lines": 14, "path": "/whileloopexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\ninitilization\nwhile condition:\n task to be repeated\n updation\n\n'''\n\ni=1\nwhile i>=5:\n #print(\"Happy B-day ankit\",end=\"\")\n print(\"Happy B-day\")\n print(\"ankit\")\n i=i+1 #i+=1" }, { "alpha_fraction": 0.400778204202652, "alphanum_fraction": 0.4941634237766266, "avg_line_length": 11.800000190734863, "blob_id": "6c58e202d3eac0c9cec27a62a1672bd72f48b9f1", "content_id": "a709299b04f29ae3f14035e2106d560566559d68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 60, "num_lines": 20, "path": "/primeexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\n9 is prime or not\n9%2==0\n9%3==0 ----\n\n29\n29%2,3,4,.....28\n\n'''\n\nn=5\ni=2\nwhile i<=n-1: # 2,3,4\n if n%i==0:\n print(\"Not prime\")\n break\n i = i + 1\n\nelse: # loop else will be executed when loop runs properly\n print(\"Prime no\")\n\n" }, { "alpha_fraction": 0.6439024209976196, "alphanum_fraction": 0.6731707453727722, "avg_line_length": 14.84615421295166, "blob_id": "11a4fb6684b4cbdd4c711aa8cb0e5507258b8335", "content_id": "923d4fa292df569597d8a0cd328a35f9a92a8fd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 205, "license_type": "no_license", "max_line_length": 50, "num_lines": 13, "path": "/recursion.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "#what is recursion? --When a function calls itself\nimport sys\n\nprint(sys.getrecursionlimit())\nsys.setrecursionlimit(2000)\ni=0\ndef msg():\n global i\n i=i+1\n print(\"Good Evening=\",i)\n msg()\n\nmsg()" }, { "alpha_fraction": 0.5538057684898376, "alphanum_fraction": 0.6456692814826965, "avg_line_length": 11.22580623626709, "blob_id": "82a46856972fc7bfe27d7c871e5126c31087e6c5", "content_id": "eaa506fd7549b8f8d8b8c643f3b8658f3661d11d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 47, "num_lines": 31, "path": "/numpyinto.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\nthere are 6 ways to create an array using numpy\n1. array()\n2. linspace()\n3. logspace()\n4. arange()\n5. zeros()\n6. ones()\n'''\n\nfrom numpy import *\n'''arr=array([1,2,3,4])\nprint(arr)\n\nfor i in arr:\n print(i)\n'''\narr2=linspace(1,50,5)\nprint(arr2)\n\narr3=logspace(1,50,5)\nprint(arr3)\n\narr4=arange(1,10,2)\nprint(arr4)\n\narr5=zeros(5,int)\nprint(arr5)\n\narr6=ones(5,int)\nprint(arr6)\n\n\n" }, { "alpha_fraction": 0.6629711985588074, "alphanum_fraction": 0.6895787119865417, "avg_line_length": 15.142857551574707, "blob_id": "66402a2a79fbc80014f21afff0cecedf2be70977", "content_id": "a1538e6d7b9de935975cbedf1b8cfc272aae0a44", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 69, "num_lines": 28, "path": "/anonymousfunction.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "# function without name is called anomymous function\n# to make a anonymous function\n# function should have only one expression but it can take any number\n#of argument\n\n'''\ndef add(a,b):\n c=a+b\n print(c)\nadd(4,5)\n'''\n\ndef add(a,b):\n return a+b # this function has only one expression\nprint(add(6,7))\n\n\nankit=lambda a,b:a+b # anomymous function\n\nresult=ankit(55,45)\nprint(result)\n\nd=ankit(4,5)\nprint(d)\n\nmul=lambda a,b:a*b\ng=mul(4,6)\nprint(g)" }, { "alpha_fraction": 0.6711111068725586, "alphanum_fraction": 0.6844444274902344, "avg_line_length": 17.75, "blob_id": "ab1e1a76269087b2cee758eedff01d28d429dca4", "content_id": "6154d67f899d24f02e719240c864908b3f764d49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 37, "num_lines": 12, "path": "/classobjimple.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "class Student:\n def student_info(self):\n print(\"Ram\",55)\n\n# how to create object of class\ns=Student()\nprint(type(s))\nStudent.student_info(s)\n# but this fuction can be accessed as\ns.student_info()\na=5\nprint(type(a))\n" }, { "alpha_fraction": 0.7440347075462341, "alphanum_fraction": 0.7570499181747437, "avg_line_length": 30.827587127685547, "blob_id": "d639ad8873e7cc72dd3feba0ae1c44cd3b0abafd", "content_id": "11ea8c36b74ddd2fe34899a92503a892fdc35678", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 922, "license_type": "no_license", "max_line_length": 67, "num_lines": 29, "path": "/menubar.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot=Tk()\n\ndef msg():\n print(\"hi\")\nroot.title(\"Menubar tutorial\")\n\nmain_menu=Menu(root)\nroot.config(menu=main_menu)\nfile_menu=Menu(main_menu,tearoff=False)\nmain_menu.add_cascade(label=\"FILE\",menu=file_menu)\nfile_menu.add_command(label=\"New\",accelerator=\"Ctrl+N\",command=msg)\nfile_menu.add_command(label=\"Open\",accelerator=\"Ctrl+O\")\nfile_menu.add_separator()\n# creating submenu\nsave_menu=Menu(file_menu,tearoff=False)\nfile_menu.add_cascade(label=\"Save\",menu=save_menu)\nsave_menu.add_command(label=\"Save Now\")\nsave_menu.add_command(label=\"Save As\")\nfile_menu.add_separator()\nfile_menu.add_command(label=\"Exit\",command=quit)\n\n# creating Editmenu\nedit_menu=Menu(main_menu,tearoff=False)\nmain_menu.add_cascade(label=\"Edit\",menu=edit_menu)\nedit_menu.add_command(label=\"Copy\",accelerator=\"Ctrl+C\")\nedit_menu.add_command(label=\"Paste\",accelerator=\"Ctrl+P\")\nroot.geometry(\"600x500+120+120\")\nroot.mainloop()" }, { "alpha_fraction": 0.6602209806442261, "alphanum_fraction": 0.7099447250366211, "avg_line_length": 24.928571701049805, "blob_id": "436ac22038212dd97217a975df29b5165e682b5d", "content_id": "f7e303d421bbf135fe6bc980eded46dad747dbb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 362, "license_type": "no_license", "max_line_length": 60, "num_lines": 14, "path": "/comboboxexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter.ttk import Combobox\nroot=Tk()\ndef combo_get():\n print(c.get())\nl=['CS','EC','IT','CIVIL Engineering']\n#l=list(range(1,32))\nc=Combobox(root,values=l,height=3,width=20)\nc.set(\"Select Your Branch\")\nc.pack()\nbtn=Button(root,text=\"get Combo box data\",command=combo_get)\nbtn.pack()\nroot.geometry(\"400x400+120+120\")\nroot.mainloop()" }, { "alpha_fraction": 0.5749464631080627, "alphanum_fraction": 0.6509636044502258, "avg_line_length": 24.97222137451172, "blob_id": "9095c775383a219de07f7768289cd5f40fc9ea38", "content_id": "bee555f47b563187ef2ddc121b518ba3446c6829", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 934, "license_type": "no_license", "max_line_length": 73, "num_lines": 36, "path": "/placeexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot=Tk()\n\ndef add():\n a=first.get()\n #print(a)\n b=second.get()\n #print(b)\n c=a+b\n #print(\"Add=\",c)\n result1.set(c)\nlabel1=Label(root,text=\"Enter First Number\",bg=\"black\",fg=\"white\",\n font=(\"Comic Sans Ms\",15))\nlabel1.place(x=50,y=50)\n\nfirst=IntVar()\ntxt=Entry(root,font=(\"Comic Sans Ms\",15),bd=\"13\",textvariable=first)\ntxt.place(x=300,y=50)\n\nlabel2=Label(root,text=\"Enter Second Number\",bg=\"black\",fg=\"white\",\n font=(\"Comic Sans Ms\",15))\nlabel2.place(x=50,y=150)\n\nsecond=IntVar()\ntxt1=Entry(root,font=(\"Comic Sans Ms\",15),bd=\"13\",textvariable=second)\ntxt1.place(x=300,y=150)\n\nbtn=Button(root,text=\"Add\",bg=\"Red\",fg=\"yellow\",\n font=(\"Comic Sans Ms\",15),command=add)\nbtn.place(x=250,y=250)\n\nresult1=IntVar()\nresult=Entry(root,font=(\"Comic Sans Ms\",15),bd=\"13\",textvariable=result1)\nresult.place(x=250,y=325)\nroot.geometry(\"600x500+250+150\")\nroot.mainloop()" }, { "alpha_fraction": 0.5968992114067078, "alphanum_fraction": 0.6175710558891296, "avg_line_length": 24.866666793823242, "blob_id": "d59b5caaa12d65091f7bd4f17ca8f33ae2188bcb", "content_id": "08ee393d13f3afcc72423facbe61018f912a4db0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 387, "license_type": "no_license", "max_line_length": 60, "num_lines": 15, "path": "/localandglobal.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "y=20 # global variable\nx=50 # global\ndef msg():\n x=10 # x is local variable\n global z # this z is global variable\n z=10 # declare local but behaves as global\n print(\"X inside function=\",x)\n print(\"Y inside function=\",y)\n # if i want to access global varibale with the same name\n d=globals()['x']\n print(\"d=x=\",d)\nmsg()\nprint(\"Y=\",y)\nprint(\"x=\",x)\nprint(\"Z =\",z)" }, { "alpha_fraction": 0.7602339386940002, "alphanum_fraction": 0.7777777910232544, "avg_line_length": 19.176469802856445, "blob_id": "57b134a8c7939a01c50defee3b8fe8e253daf784", "content_id": "6adab717d8ffc607c49d9bac094c126ac29eae7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 342, "license_type": "no_license", "max_line_length": 64, "num_lines": 17, "path": "/inhetinaceexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "''''\n\nobject oriented Programming (OOPs)\n1. Inheritance\nDeriving a new class from the base class is known as inheritance\ntypes of inheritance:\n1. single level inheritance\n2. multilevel inheritance\n3. hierarchical inheritance\n4. multiple inheritance\n5. hybrid inheritance\n\nWhy we use inheritance\n It provides the code reusability.\n\n\n'''" }, { "alpha_fraction": 0.6525423526763916, "alphanum_fraction": 0.6864407062530518, "avg_line_length": 28.75, "blob_id": "4b64d6f33b1a5ff68ba1f969c77bcee3a5fc2b9e", "content_id": "5f9765c2de3245a80ecc403ee94c5b2c1ffda9a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 53, "num_lines": 4, "path": "/farentodegree.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "f=float(input(\"Enter the temperature in farenhight\"))\nprint(\"Given Temp=\",f)\nc=(5*(f-32))/9\nprint(\"Temp in degree=\",c)" }, { "alpha_fraction": 0.6728624701499939, "alphanum_fraction": 0.6802973747253418, "avg_line_length": 18.285715103149414, "blob_id": "de6cf4c2ec4213d12df775606875b090aad18bc1", "content_id": "04d3e595a8930c0ff14c79c3d1a021638e7e2646", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 269, "license_type": "no_license", "max_line_length": 48, "num_lines": 14, "path": "/speak.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from gtts import gTTS\nfrom playsound import playsound\n\n\ndef save_audio(text):\n print(text)\n tts=gTTS(text)\n tts.save('a.mp3')\n speak_to_text('a.mp3')\n\ndef speak_to_text(audio_path):\n playsound(audio_path)\n\nsave_audio(\"Hello how r u? I am fine thank you\")" }, { "alpha_fraction": 0.6657534241676331, "alphanum_fraction": 0.7068493366241455, "avg_line_length": 23.33333396911621, "blob_id": "4e0bb20c83257944217bb281dd72e5344f808941", "content_id": "d204bc353837e0e5d85dee5f72e7595b64dfabaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 365, "license_type": "no_license", "max_line_length": 59, "num_lines": 15, "path": "/colorchooserandchangeicon.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import colorchooser\nroot=Tk()\ndef color_chooser():\n c=colorchooser.askcolor()\n print(c)\n print(c[0])\n print(c[1])\n root.config(bg=c[1])\nroot.title(\"Color chooser\")\nroot.wm_iconbitmap('note.ico')\nbtn=Button(root,text=\"Color Chooser\",command=color_chooser)\nbtn.pack()\nroot.geometry(\"400x400+120+120\")\nroot.mainloop()\n" }, { "alpha_fraction": 0.47417840361595154, "alphanum_fraction": 0.5633803009986877, "avg_line_length": 16.83333396911621, "blob_id": "ea4d6b712cd0fe5c5b6982d4acd2aff0987be57f", "content_id": "6c58d13a4d7e3105f2c28d1910b2d953b384191c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "no_license", "max_line_length": 59, "num_lines": 12, "path": "/factorial.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\nfactorial of 5= 1*2*3*4*5\n 5*4*3*2*1\nfibbonacci series 1,2,3,5,8....\n'''\nn=int(input(\"enter any number for which u want factorial\"))\ni=1\nf=1\nwhile i<=n:\n f=f*i\n i=i+1\nprint(\"Factorial=\",f)" }, { "alpha_fraction": 0.6580311059951782, "alphanum_fraction": 0.6632124185562134, "avg_line_length": 28.615385055541992, "blob_id": "7f410b2482f186aab31aadf863b8a4077b4e039c", "content_id": "20942adfbd031eb358c0532b139e9b806cd50d4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 386, "license_type": "no_license", "max_line_length": 58, "num_lines": 13, "path": "/userdefinedcalculation.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "f=int(input(\"Enter the first number\"))\ns=int(input(\"Enter the Second number\"))\nprint(\"Enter A for add S for Sub\")\nuser_input=input(\"enter your Choice\")[0]\nprint(user_input)\n#print(user_input[0])\n\nif user_input=='a' or user_input=='A':\n print(\"Add=\",f+s)\nelif user_input=='s' or user_input=='S':\n print(\"Sub=\",f-s)\nelse:\n print(\"Bhai Zyada hosiyar mat bano jo bola wahi karo\")\n\n" }, { "alpha_fraction": 0.5819209218025208, "alphanum_fraction": 0.6638417840003967, "avg_line_length": 13.791666984558105, "blob_id": "609e103c99e643fedcddc469a2d16b5d7bac8161", "content_id": "a31dc046e79114530d21b15c5d38321e6e80922b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 354, "license_type": "no_license", "max_line_length": 35, "num_lines": 24, "path": "/twodarray.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from numpy import *\narr=array([\n [1,2,3],\n [4,5,6],\n [8,7,9],\n [4,4,4]\n])\nprint(arr)\nprint(arr.ndim)\nprint(arr.shape)\nprint(arr.size)\nprint(arr.dtype)\n# convert 2d array into one d array\narr2=arr.flatten()\nprint(arr2)\nprint(arr2.ndim)\n\narr3=arr2.reshape(3,4)\nprint(arr3)\nprint(arr3.ndim)\n\narr4=arr2.reshape(2,2,3)\nprint(arr4)\nprint(arr4.ndim)" }, { "alpha_fraction": 0.6423611044883728, "alphanum_fraction": 0.6701388955116272, "avg_line_length": 21.19230842590332, "blob_id": "a25ec82a5cda4760b4ed0b2af68d2d99c462a000", "content_id": "723991c20def3a3b56988a4ff506e16aab4219e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 576, "license_type": "no_license", "max_line_length": 67, "num_lines": 26, "path": "/listboxexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot=Tk()\nroot.config(bg='yellow')\ndef list_get():\n get_data=l.curselection()\n print(get_data)\n for i in get_data:\n print(l.get(i))\n\ndef list_delete():\n r=l.curselection()\n for i in r:\n print(l.delete(i))\nl=Listbox(root,selectmode=BROWSE)\nl.insert(1,\"CS\")\nl.insert(2,'EC')\nl.insert(3,'CIVIL')\nl.insert(4,'ME')\nl.pack()\nbtn=Button(root,text=\"get Combo box data\",command=list_get)\nbtn.pack()\n\nbtn_delete=Button(root,text=\"Delete list Data\",command=list_delete)\nbtn_delete.pack()\nroot.geometry(\"400x400+120+120\")\nroot.mainloop()" }, { "alpha_fraction": 0.4948979616165161, "alphanum_fraction": 0.4948979616165161, "avg_line_length": 11.3125, "blob_id": "1d8ca402c9e1cb522864f19d3868d8f9cd62da1a", "content_id": "26533e828cac18116b119313a1014595d46144f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 24, "num_lines": 16, "path": "/multipleinhe.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "class A:\n def a(self):\n print(\"Class A\")\n\nclass B:\n def b(self):\n print(\"Class B\")\n\nclass C(A,B):\n def c(self):\n print(\"Class C\")\n\ncobj=C()\ncobj.a()\ncobj.b()\ncobj.c()" }, { "alpha_fraction": 0.5502645373344421, "alphanum_fraction": 0.6084656119346619, "avg_line_length": 18, "blob_id": "7b70f3ac23e022259e4ca21178a0cc902e900d71", "content_id": "c178af6fd41aada4a031d9288379747fb70f1732", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 189, "license_type": "no_license", "max_line_length": 51, "num_lines": 10, "path": "/star.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from turtle import *\nt=Turtle()\ncolor_list=['red','yellow','green','blue','orange']\nd=300\nfor i in range(200):\n t.color(color_list[i%5])\n t.forward(d)\n t.left(144)\n d=d-3\ndone()" }, { "alpha_fraction": 0.6193853616714478, "alphanum_fraction": 0.6371158361434937, "avg_line_length": 15.9399995803833, "blob_id": "eb9128ab9f4ba3067e935e6348cf7d93d2e0a4fa", "content_id": "f17364537b13cefbb2dc1113410c12c5660b040b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 846, "license_type": "no_license", "max_line_length": 58, "num_lines": 50, "path": "/functionintro2.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from math import *\ns=sqrt(36)\nprint(s)\n\n\n'''\nfunction should not return any value\nfunction should return value\nfunction should not recieve any argument/parameter\nfunction should take parameter\n\n'''\n\ndef add():\n #x,y=2,10\n x=int(input(\"enter first number\"))\n y = int(input(\"enter second number\"))\n c=x+y\n print(\"Add=\",c)\n\n#add()\n#add()\n\ndef add1():\n #x,y=2,10\n x=int(input(\"enter first number\"))\n y = int(input(\"enter second number\"))\n c=x+y\n d=x-y\n return c,d\n\n#s,t=add1()\n#print(s,t)\n\ndef multi(x,y): # here x and y are called formal argument\n m=x*y\n print(m)\n\na=int(input(\"enter first number\"))\nb=int(input(\"enter second number\"))\nmulti(5,6)\nmulti(a,b) # here a and b are called actual argument\n\n\ndef sub(x,y): # here x and y are called formal argument\n d=x//y\n return d\n\ndiv=sub(50,5)\nprint(div)" }, { "alpha_fraction": 0.7462406158447266, "alphanum_fraction": 0.7556390762329102, "avg_line_length": 37.07143020629883, "blob_id": "6a39f56787d9a5eba3983e3c747572169e02545e", "content_id": "802d8b4f90b7016fa1f097c693e764c4668b460b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 532, "license_type": "no_license", "max_line_length": 66, "num_lines": 14, "path": "/databaseconnectexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "import pymysql\nconn=pymysql.connect(host=\"localhost\",user=\"root\",db=\"mppython\")\nprint(\"connection established\")\nmycursor=conn.cursor()\n#que=\"create table user_info(name varchar(50),age int(3))\"\nque=\"insert into user_info(name,age)values('Ram',22)\"\n#que=\"update user_info set name='Shyam' where name='Ram'\"\nmycursor.execute(que)\nconn.commit() # to save the result(data) of execution of any query\n#print(\"table Created Successfully\")\nprint(\"Data Stored successfully\")\n#print(\"Data Updated successfully\")\nmycursor.close()\nconn.close()" }, { "alpha_fraction": 0.49700599908828735, "alphanum_fraction": 0.5269461274147034, "avg_line_length": 22.85714340209961, "blob_id": "71a60442f0fa38cd64e3d04f3c2ed2cd8d9571cc", "content_id": "01349f19978b3ce8653a56cca8d31145ed08062a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 334, "license_type": "no_license", "max_line_length": 37, "num_lines": 14, "path": "/leapyearcalculation.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "try:\n year=int(input(\"Enter the year\"))\n print(\"Year=\",year)\n if year%100==0:\n if year%400==0:\n print(\"Leap year\")\n else:\n print(\"Not Leap Year\")\n elif year%4==0:\n print(\"Leap year\")\n else:\n print(\"Not leap year\")\nexcept Exception:\n print(\"Year Cannt be character\")\n" }, { "alpha_fraction": 0.5559105277061462, "alphanum_fraction": 0.6613418459892273, "avg_line_length": 15.526315689086914, "blob_id": "cc265a2f4446883729ad10bcde093e49ca81f242", "content_id": "c34e18d86b592a6a2ccbcf66215c2935f69f73a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 313, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/trutlecircledrawing.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from turtle import *\nt=Turtle()\nw=Screen()\nw.setup(800,600)\n'''\nt.circle(50) # anticlock wise\nt.circle(-50) # clockwise\nt.undo()\nt.circle(100)\nt.reset()\n'''\nt.circle(100,steps=10)\nt.circle(-75,extent=180)\nt.up()\nt.goto(50,100)\nt.down()\nt.circle(200)\nt.write(\"This is Done by me\",font=(\"Comic Sans MS\",15))\ndone()" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 25, "blob_id": "51af9747c517419747e266a9063ed29642611f73", "content_id": "5204699248defee681b79d365e701ff79455c4e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "no_license", "max_line_length": 25, "num_lines": 1, "path": "/fourth.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "print(\"Last test commit\")" }, { "alpha_fraction": 0.4793814420700073, "alphanum_fraction": 0.49484536051750183, "avg_line_length": 18.5, "blob_id": "c126d79096ce322b3e686b7d44ae900f5b861dc3", "content_id": "f30c993945dd6690a153472dcdc39db047eaa4fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "no_license", "max_line_length": 65, "num_lines": 10, "path": "/factorialusing.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "def testfact():\n n = int(input(\"enter any number for which u want factorial\"))\n i = 1\n f = 1\n while i <= n:\n f = f * i\n i = i + 1\n print(\"Factorial=\", f)\n\ntestfact()" }, { "alpha_fraction": 0.6535836458206177, "alphanum_fraction": 0.6860068440437317, "avg_line_length": 26.952381134033203, "blob_id": "ff665dee6c948145ea0cad47e9e16931cf5342f1", "content_id": "8f9613d1432356e7aecdedd6ee451e47be340a68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 586, "license_type": "no_license", "max_line_length": 59, "num_lines": 21, "path": "/typeofvariableinclass.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\ntype of variable:\n 1. class variable or static variable\n 2. instance variable -- variable declared inside function\n'''\n\nclass Car:\n wheel=4 # class variable\n def __init__(self):\n print(\"This is car\")\n self.milage=25 #instance variable\n self.company_name=\"Maruti\" #instance variable\n\nc1=Car() # automatically call __init\nc2=Car()\nc3=Car()\nCar.wheel=8 # we need class name--class variable\nc2.milage=55 # we need object --instance variable\nc2.company_name='Tata'\nprint(c1.milage,c1.company_name,c1.wheel)\nprint(c2.milage,c2.company_name,c2.wheel)" }, { "alpha_fraction": 0.6287051439285278, "alphanum_fraction": 0.6349453926086426, "avg_line_length": 23.69230842590332, "blob_id": "8ba51d5acf05c24835f58d574974983918b59165", "content_id": "a654f955f085ac90de77b121bdff606ed92195f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 641, "license_type": "no_license", "max_line_length": 61, "num_lines": 26, "path": "/constructorinpython.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\nwhat is constructor?\nas the name suggest it construct something.\nconstructor is a special member function, which can be\ninvoked(call) automatically , when the object of its class\nis created.\n'''\n\nclass Student:\n def student_info(self):\n print(\"Name=\",self.name,\"age=\",self.age)\n\n '''\n def get_data(self,name,age):\n self.name=name\n self.age=age\n '''\n ''' constructor'''\n def __init__(self,name,age):\n self.name = name\n self.age = age\n print(\"I can when the object is created\")\n\ns=Student(\"Ram\",44) # when object is created constuctor runs\n#s.get_data(\"Ram\",44)\ns.student_info()" }, { "alpha_fraction": 0.5420168042182922, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 13.8125, "blob_id": "bf0fe6f0e4a624cc08db9fbc6fd3e9cdec0d0e4e", "content_id": "1683fe967b64a95782a6e21c00e52ee48b615741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 238, "license_type": "no_license", "max_line_length": 27, "num_lines": 16, "path": "/nupyoperation.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from numpy import *\narr=array([1,2,3,4,5,6])\nprint(arr)\narr2=arr*5\nprint(arr2)\narr3=array([2,5,3,4,7,8])\nprint(arr+arr3)\n\nsum1=0\nfor i in arr3:\n sum1=sum1+i\n #print(i)\nprint(\"Sum of Marks=\",sum1)\n\nprint(sum(arr3))\nprint(max(arr3))\n\n" }, { "alpha_fraction": 0.6302413940429688, "alphanum_fraction": 0.6861499547958374, "avg_line_length": 29.30769157409668, "blob_id": "da885a4e020da871b071679125d0fbc49154a5b5", "content_id": "9d2942d999ac6695342e2bdf8ed7a01a749b2e97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 787, "license_type": "no_license", "max_line_length": 68, "num_lines": 26, "path": "/gridexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from tkinter import *\nroot=Tk()\n\nlabel1=Label(root,text=\" Enter First Number \",bg=\"black\",fg=\"white\",\n font=(\"Comic Sans Ms\",15))\nlabel1.grid(row=0,column=0)\nfirst=IntVar()\ntxt=Entry(root,font=(\"Comic Sans Ms\",15),textvariable=first)\ntxt.grid(row=0,column=1)\n\nlabel2=Label(root,text=\"Enter Second Number\",bg=\"black\",fg=\"white\",\n font=(\"Comic Sans Ms\",15))\nlabel2.grid(row=1,column=0)\nsecond=IntVar()\ntxt1=Entry(root,font=(\"Comic Sans Ms\",15),textvariable=second)\ntxt1.grid(row=1,column=1)\n\nbtn=Button(root,text=\"Add\",bg=\"Red\",fg=\"yellow\",\n font=(\"Comic Sans Ms\",15))\nbtn.grid(row=2,columnspan=2)\n\nsecond=IntVar()\ntxt1=Entry(root,font=(\"Comic Sans Ms\",15),textvariable=second)\ntxt1.grid(row=1,column=1)\n#root.geometry(\"600x500+250+150\")\nroot.mainloop()" }, { "alpha_fraction": 0.4551083445549011, "alphanum_fraction": 0.49845200777053833, "avg_line_length": 13.086956977844238, "blob_id": "0d4f225b0ef4aa6fb0d7b8ab230c616457e50371", "content_id": "ac434b9d74aa85a579daf01d3bfd9e43c8b16f30", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 323, "license_type": "no_license", "max_line_length": 22, "num_lines": 23, "path": "/variablelengthargument.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "def add(x,y,z):\n c=x+y+z\n print(\"Add=\",c)\n\nadd(2,5,8)\n\ndef variablesum(a,*b):\n #print(a)\n #print(b,type(b))\n c=a\n for i in b:\n c=c+i\n print(c)\nvariablesum(1,2,3,4)\n\ndef variablesum1(*b):\n #print(a)\n #print(b,type(b))\n c=0\n for i in b:\n c=c+i\n print(c)\nvariablesum1(1,2,3,4)" }, { "alpha_fraction": 0.5476190447807312, "alphanum_fraction": 0.5952380895614624, "avg_line_length": 16.571428298950195, "blob_id": "b6656df4952e9cab52eabcc8c129e0a43611e09c", "content_id": "53a8693fbb0aa7f03b25f30959daf4ea3b6c15ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 126, "license_type": "no_license", "max_line_length": 23, "num_lines": 7, "path": "/argumentinfunction.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "\n\n\n\ndef person(name,age):\n print(\"Name=\",name)\n age=age+10\n print(\"Age=\",age)\n\nperson('Ashwani',38)\nperson(36,'abcd')" }, { "alpha_fraction": 0.5330396294593811, "alphanum_fraction": 0.5859031081199646, "avg_line_length": 16.538461685180664, "blob_id": "7d7f6b0c7a6a184cdcbccf011818bb6d29e6a3af", "content_id": "04a07864645c1441993b0887dd27db63b9aee04b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 227, "license_type": "no_license", "max_line_length": 42, "num_lines": 13, "path": "/drawmultiplecircle.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from turtle import *\nt=Turtle()\nt.up()\nt.goto(200,0)\ncolor_list=['red','yellow','green','blue']\nfor i in range(4):\n t.down()\n t.pensize(10)\n t.color(color_list[i])\n t.circle(50)\n t.up()\n t.backward(100)\ndone()" }, { "alpha_fraction": 0.537102460861206, "alphanum_fraction": 0.554770290851593, "avg_line_length": 13.947368621826172, "blob_id": "95b3545e04226bcba1a1e7c3a09afc45842d0bb3", "content_id": "23bccd19b38a1e88aec03766184cc4aa31510a87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 283, "license_type": "no_license", "max_line_length": 59, "num_lines": 19, "path": "/mymodule.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "def msg():\n print(\"Good Morning\")\n\ndef add(x,y):\n print(x+y)\n\ndef sub(c,d):\n print(c-d)\n\nprint(__name__) #special variable which is already defined\n\nif __name__=='__main__':\n def speak():\n print(\"This is original file\")\n speak()\n'''sub(50,5)\nadd(4,6)\nmsg()\n'''" }, { "alpha_fraction": 0.47096773982048035, "alphanum_fraction": 0.5419355034828186, "avg_line_length": 12.70588207244873, "blob_id": "d63646770bc2ca4999b8fd1130ba879e4e666183", "content_id": "13ef035d2610325b999b1d48b9e1ef82add9dcf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 29, "num_lines": 34, "path": "/forexp.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\n\nfor <varibale> in <sequence>:\n task to be repeated\n\nrange(10)---0,..9\n'''\nimport time\n'''\nfor i in range(10):\n print(i,\" Happy B-day\")\n\nl=[1,3,4,6,7,8,8]\nfor i in l:\n print(i,end=\"\")\n\nprint()\nname=\"Techsrijan\"\nfor i in name:\n print(i,end=\"\")\n time.sleep(1)\n\nfor j in range(1,10):\n print(j)'''\n\nfor k in range(1,10,2):\n print(k,end=\"\")\n\nfor k in range(100,50,-2):\n print(k,end=\"\")\n\nfor k in range(1,100) :\n if k%5==0:\n print(k)" }, { "alpha_fraction": 0.5663506984710693, "alphanum_fraction": 0.5947867035865784, "avg_line_length": 12.21875, "blob_id": "a12ac9a3c6b4ab7fc2ee13c54e387cf576ea7b2a", "content_id": "51e90807374f32cf5ebb76cffaf4a9637686c5bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 44, "num_lines": 32, "path": "/arrayintroduction.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "'''\nAn array is collection of similar data type.\n\n'''\nimport time\n#import array\n#import array as m\nfrom array import *\na=array('f',[1,2,3,4,5,6,8.7])\nprint(a)\nprint(a.typecode)\nprint(a.buffer_info())\n#a.reverse()\nprint(a)\nprint(len(a))\n#print(a[0])\n\nfor i in range(len(a)):\n print(a[i])\n\n\nj=0\nwhile j<len(a):\n print(a[j])\n j=j+1\n\nfor k in a:\n print(a)\n time.sleep(1)\n\nd=array('u',['a','b','c','d'])\nprint(d)" }, { "alpha_fraction": 0.5578727722167969, "alphanum_fraction": 0.6548488140106201, "avg_line_length": 22.390243530273438, "blob_id": "77ec1f6a86a0dfacc406fb8409d9224d759ccda8", "content_id": "15e5ffe481818e55306e015fbb7b37f67125e671", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 959, "license_type": "no_license", "max_line_length": 69, "num_lines": 41, "path": "/arraycopy.py", "repo_name": "Techsrijan/mppython2021", "src_encoding": "UTF-8", "text": "from numpy import *\narr=array([1,2,3,4,5])\nprint(\"arr=\",arr , \"id of arr=\",id(arr))\n\n# aliasing\narr2=arr # assign the value of one array into another\nprint(\"arr2=\",arr2,\"id of arr2=\",id(arr2))\n\narr[0]=500\nprint(\"arr=\",arr , \"id of arr=\",id(arr))\nprint(\"arr2=\",arr2,\"id of arr2=\",id(arr2))\n\n\n#shallow copy\n# what if i want to create two different array i.e. two diffrent\n# memory address\n\narr3=array([3,5,6,3,77,88])\nprint(\"arr3=\",arr3,\"id of arr3=\",id(arr3))\n\narr4=arr3.view()\nprint(\"arr4=\",arr4 , \"id of arr4=\",id(arr4))\n\narr3[2]=5658666\nprint(\"arr3=\",arr3,\"id of arr3=\",id(arr3))\nprint(\"arr4=\",arr4 , \"id of arr4=\",id(arr4))\n\n\n#deep copy\n# if i want two diffrenent memory address and changes on one does not\n# affect other\n\narr5=[3,77,22,99,333,000,55]\nprint(\"arr5=\",arr5,\"id of arr5=\",id(arr5))\n\narr6=arr5.copy()\nprint(\"arr6=\",arr6,\"id of arr6=\",id(arr6))\n\narr5[0]=800\nprint(\"arr5=\",arr5,\"id of arr5=\",id(arr5))\nprint(\"arr6=\",arr6,\"id of arr6=\",id(arr6))\n" } ]
76
gwaybio/hetio
https://github.com/gwaybio/hetio
b531cfd7ac28bd16752e62e87701bf921c46edf1
ddc655c1f3ad4687515fbfb2235ffe4cf666cdd5
b55a6a3e2e82f33e9d550ac35a5948a4a4b4a8c6
refs/heads/master
2022-06-11T18:54:20.442948
2018-03-27T14:36:44
2018-03-27T14:36:44
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6723181009292603, "alphanum_fraction": 0.6766337752342224, "avg_line_length": 35.449440002441406, "blob_id": "ed9a1460e1d4ba78ff4b6cce98132cf46b3f51bd", "content_id": "dd9941a1efba79327ea3360e6009d874f586a1eb", "detected_licenses": [ "CC0-1.0", "LicenseRef-scancode-unknown-license-reference" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3252, "license_type": "permissive", "max_line_length": 79, "num_lines": 89, "path": "/hetio/matrix.py", "repo_name": "gwaybio/hetio", "src_encoding": "UTF-8", "text": "from collections import OrderedDict\n\nimport numpy\nimport scipy.sparse\n\nimport hetio.hetnet\n\n\ndef get_node_to_position(graph, metanode):\n \"\"\"\n Given a metanode, return a dictionary of node to position\n \"\"\"\n if not isinstance(metanode, hetio.hetnet.MetaNode):\n # metanode is a name\n metanode = graph.metagraph.node_dict[metanode]\n metanode_to_nodes = graph.get_metanode_to_nodes()\n nodes = sorted(metanode_to_nodes[metanode])\n node_to_position = OrderedDict((n, i) for i, n in enumerate(nodes))\n return node_to_position\n\n\ndef metaedge_to_adjacency_matrix(\n graph, metaedge, dtype=numpy.bool_, dense_threshold=0):\n \"\"\"\n Returns an adjacency matrix where source nodes are rows and target\n nodes are columns.\n\n Parameters\n ==========\n graph : hetio.hetnet.graph\n metaedge : hetio.hetnet.MetaEdge\n dtype : type\n dense_threshold : float (0 ≤ dense_threshold ≤ 1)\n minimum proportion of nonzero values at which to output a dense matrix.\n Default of 0 ensures output is always dense.\n\n Returns\n =======\n row_names : list\n column_names : list\n matrix : numpy.ndarray or scipy.sparse\n \"\"\"\n if not isinstance(metaedge, hetio.hetnet.MetaEdge):\n # metaedge is an abbreviation\n metaedge = graph.metagraph.metapath_from_abbrev(metaedge)[0]\n source_nodes = list(get_node_to_position(graph, metaedge.source))\n target_node_to_position = get_node_to_position(graph, metaedge.target)\n shape = len(source_nodes), len(target_node_to_position)\n row, col, data = [], [], []\n for i, source_node in enumerate(source_nodes):\n for edge in source_node.edges[metaedge]:\n row.append(i)\n col.append(target_node_to_position[edge.target])\n data.append(1)\n adjacency_matrix = scipy.sparse.csc_matrix(\n (data, (row, col)), shape=shape, dtype=dtype)\n adjacency_matrix = sparsify_or_densify(adjacency_matrix, dense_threshold)\n row_names = [node.identifier for node in source_nodes]\n column_names = [node.identifier for node in target_node_to_position]\n return row_names, column_names, adjacency_matrix\n\n\ndef sparsify_or_densify(matrix, dense_threshold=0.3):\n \"\"\"\n Automatically convert a scipy.sparse to a numpy.ndarray if the percent\n nonzero is above a given threshold. Automatically convert a numpy.ndarray\n to scipy.sparse if the percent nonzero is below a given threshold.\n\n Parameters\n ==========\n matrix : numpy.ndarray or scipy.sparse\n dense_threshold : float (0 ≤ dense_threshold ≤ 1)\n minimum proportion of nonzero values at which to output a dense matrix.\n Setting to 0 ensures output is dense. Setting to 1 ensures output is\n sparse, unless matrix has no zero entries (use dense_threshold > 1) to\n guarantee sparse output.\n\n Returns\n =======\n matrix : numpy.ndarray or scipy.sparse\n \"\"\"\n density = (matrix != 0).sum() / numpy.prod(matrix.shape)\n densify = density >= dense_threshold\n sparse_input = scipy.sparse.issparse(matrix)\n if sparse_input and densify:\n return matrix.toarray()\n if not sparse_input and not densify:\n return scipy.sparse.csc_matrix(matrix)\n return matrix\n" } ]
1
yiheihu/bbs-demo
https://github.com/yiheihu/bbs-demo
00edd3a98d12de46807517fc38957b08ec519f23
561407b365ff38c311e2aacfd5e6d64d131d4c53
bc76ce37cb28b645fe8e3206cd38356f12e4cf92
refs/heads/master
2021-01-11T16:00:29.441469
2017-01-25T03:51:31
2017-01-25T03:51:31
79,980,759
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6122449040412903, "alphanum_fraction": 0.6167800426483154, "avg_line_length": 26.5, "blob_id": "24ccc75b3703c124ff32844707a05354ad147ff2", "content_id": "9ff4e8cb5ea8d2134e86e0d86fe48c1c325a36b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 441, "license_type": "no_license", "max_line_length": 83, "num_lines": 16, "path": "/models/node.py", "repo_name": "yiheihu/bbs-demo", "src_encoding": "UTF-8", "text": "from . import ModelMixin\nfrom . import db\nfrom . import timestamp\n\n\nclass Node(db.Model, ModelMixin):\n __tablename__ = 'nodes'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.Text(20))\n topics = db.relationship('Topic', backref='node', foreign_keys='Topic.node_id')\n\n def __init__(self, form):\n self.name = form.get('name', '')\n\n def _update(self, form):\n self.name = form.get('name', '')\n\n" }, { "alpha_fraction": 0.6225764751434326, "alphanum_fraction": 0.6286083459854126, "avg_line_length": 23.648935317993164, "blob_id": "9170aa1ae3d616592767cc3b828b40515ba724d6", "content_id": "0190031f1865c2b8d9c48d48d121ecae70cae530", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2535, "license_type": "no_license", "max_line_length": 89, "num_lines": 94, "path": "/routes/user.py", "repo_name": "yiheihu/bbs-demo", "src_encoding": "UTF-8", "text": "from routes import *\nfrom models.user import User\nfrom . import current_user, valid_id\nimport uuid\n\n\nmain = Blueprint('user', __name__)\n\nModel = User\n\n\n\n\[email protected]('/index/login')\ndef login_index():\n return render_template('login.html')\n\n\[email protected]('/index/register')\ndef register_index():\n return render_template('register.html')\n\n\[email protected]('/login', methods=['POST'])\ndef login():\n form = request.form\n m = Model(form)\n u = Model.query.filter_by(username=m.username).first()\n if m.valid_login(u):\n session.permanent = True\n session['uid'] = u.id\n return redirect(url_for('homepage.show', id=1))\n else:\n return redirect(url_for('.login_index'))\n\n\n\[email protected]('/register', methods=['POST'])\ndef register():\n form = request.form\n m = Model(form)\n status, msgs = m.valid()\n if status:\n m.save()\n session.permanent = True\n session['uid'] = m.id\n return redirect(url_for('homepage.show', id=1))\n else:\n resoult = '<br>'.join(msgs)\n return resoult\n\n\[email protected]('/settings')\n@current_user\ndef settings(u):\n return render_template('settings.html', user=u)\n\n\[email protected]('/settings/avatar', methods=['post'])\n@current_user\ndef avatar(u):\n file = request.files.get('avatar') #获取文件对象\n uploads_dir = 'static/img/avatar/'\n filename = avatar_name(file.filename) # file.filename获取文件名\n path = uploads_dir + filename\n file.save(path)\n old_path = uploads_dir + u.avatar\n if old_path != 'static/img/avatar/0001.jpeg':\n os.remove(old_path)\n u.avatar = filename\n u.save()\n return render_template('settings.html', user=u)\n#用Flask处理文件上传,上传的文件储存在内存或者文件系统中的一个临时位置,你可以通过request 对象的 files 属性来访问这些文件,每个上传的文件都储存在那个字典里。\n#它还有一个 save() 方法允许你把文件存储在服务器的文件系统上。\ndef avatar_name(old_filename):\n valid_filetypes = ('png', 'jpg', 'jpeg', 'gif', 'apng')\n a = str(uuid.uuid4())\n b = old_filename.split('.')[-1]\n if b not in valid_filetypes:\n return abort(404)\n else:\n filename = a + \".\" + b\n return filename\n\n\n\[email protected]('/member/<username>')\ndef member(username):\n if username != '游客':\n m = Model.query.filter_by(username=username).first()\n m.get_comment_num()\n return render_template('member.html', user=m)\n else:\n abort(401)\n\n\n\n\n" }, { "alpha_fraction": 0.599433422088623, "alphanum_fraction": 0.6028328537940979, "avg_line_length": 19.69411849975586, "blob_id": "4d9746a1634cbc24666eeb283c2442be3a454e0c", "content_id": "49c91e5d31e2d07ff2f6b8d024b1fde8ba6cabbb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1769, "license_type": "no_license", "max_line_length": 87, "num_lines": 85, "path": "/routes/topic.py", "repo_name": "yiheihu/bbs-demo", "src_encoding": "UTF-8", "text": "from routes import *\nfrom models.topic import Topic , Comment\nfrom models.user import User\n\nfrom . import current_user, valid_id\n\n\n\nmain = Blueprint('topic', __name__)\n\nModel = Topic\n\n\n\n\n\[email protected]('/')\ndef base():\n return render_template('topic_show.html')\n\n\[email protected]('/new')\n@current_user\ndef new(u):\n if u.username == '游客':\n abort(401)\n else:\n return render_template('topic_new.html', user=u)\n\n\[email protected]('/add', methods=['POST'])\n@current_user\ndef add(u):\n form = request.form\n print('add topic form', form)\n m = Model(form)\n # m.node_id = int(form.get('node_id', -1))\n print('m', m)\n print('m.node_id',m.node_id)\n print('m.content', m.content)\n m.user = u\n print('mm', m)\n m.save()\n print('mmm', m)\n id = m.id\n return redirect(url_for('.show', id=id))\n\n\[email protected]('/show/<int:id>')\n@current_user\ndef show(u, id):\n m = Model.query.get(id)\n m.comments = Comment.query.filter_by(topic_id=id).order_by(Comment.id.desc()).all()\n m.comment_num()\n return render_template('topic_show.html', topic=m, user=u)\n\n\[email protected]('/edit/<int:id>')\n@valid_id\ndef edit(id):\n m = Model.query.get(id)\n return render_template('topic_edit.html', topic=m)\n\n\[email protected]('/update', methods=['POST'])\ndef update():\n form = request.form\n id = int(form.get('id', -1))\n m = Model.query.get(id)\n m.update(form)\n return redirect(url_for('.show', id=id))\n\n\[email protected]('/comment/add', methods=['POST'])\n@current_user\ndef addComment(u):\n form = request.form\n print('form', form)\n c = Comment(form)\n c.topic_id = int(form.get('topic_id', -1))\n c.user = u\n c.save()\n print(' c.content', c.content)\n id = c.topic_id\n return redirect(url_for('.show', id=id))\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6610169410705566, "alphanum_fraction": 0.6610169410705566, "avg_line_length": 21.125, "blob_id": "9bef9a5ff94bc8559162f5dff45fb621575dd651", "content_id": "23e92d348a339f2a94e1d148399aa19ce69b93d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 354, "license_type": "no_license", "max_line_length": 76, "num_lines": 16, "path": "/routes/homepage.py", "repo_name": "yiheihu/bbs-demo", "src_encoding": "UTF-8", "text": "from routes import *\nfrom models.node import Node\nfrom . import current_user\n\n\nmain = Blueprint('homepage', __name__)\n\nModel = Node\n\[email protected]('/show/<int:id>')\n@current_user\ndef show(u, id):\n m_list = Model.query.all()\n m = Model.query.get(id)\n m.get_comment_num()\n return render_template('node_show.html', node=m, user=u, n_list=m_list )\n" }, { "alpha_fraction": 0.5744323134422302, "alphanum_fraction": 0.5803195834159851, "avg_line_length": 22.760000228881836, "blob_id": "6bde18971de0bdc05151d5caf33c4976c9521357", "content_id": "f95a67204749061d95f7f4c50fee5a12418e687f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1193, "license_type": "no_license", "max_line_length": 56, "num_lines": 50, "path": "/routes/__init__.py", "repo_name": "yiheihu/bbs-demo", "src_encoding": "UTF-8", "text": "from flask import Blueprint\nfrom flask import jsonify\nfrom flask import redirect\nfrom flask import render_template\nfrom flask import request\nfrom flask import send_from_directory\nfrom flask import session\nfrom flask import url_for\nfrom flask import abort\nfrom functools import wraps\nfrom models.user import User\nfrom models.topic import Topic\nimport os\nfrom werkzeug.utils import secure_filename\nimport random\n\n\n\n\ndef current_user(f):\n @wraps(f)\n def function(*args,**kw):\n user_id = session.get('uid')\n if user_id != None:\n u = User.query.get(user_id)\n return f(u, *args,**kw)\n else:\n form = {\n 'username' : '游客',\n 'avatar' :'0001.jpeg'\n }\n u = User(form)\n\n return f(u, *args,**kw)\n return function\n\n\ndef valid_id(f):\n @wraps(f)\n def function(id):\n user_id = session.get('uid')\n topic = Topic.query.get(id)\n if user_id != None:\n if user_id == topic.user_id:\n return f(id)\n else:\n abort(401)\n else:\n return redirect(url_for('user.login_index'))\n return function\n\n" }, { "alpha_fraction": 0.6188119053840637, "alphanum_fraction": 0.6227722764015198, "avg_line_length": 18.075471878051758, "blob_id": "aaaa2df09ddff8a17bca6a248b879ae900dcb568", "content_id": "6c7a77db4d546fe3a8340e7a4c2bb8d726087722", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1010, "license_type": "no_license", "max_line_length": 59, "num_lines": 53, "path": "/routes/node.py", "repo_name": "yiheihu/bbs-demo", "src_encoding": "UTF-8", "text": "from routes import *\nfrom models.node import Node\n\n\nmain = Blueprint('node', __name__)\n\nModel = Node\n\ndef nowaday_user():\n user_id = session.get('uid', -1)\n u = User.query.get(user_id)\n return u\n\n\ndef admin_required():\n u = nowaday_user()\n if u is None or not u.is_admin():\n abort(404)\n\nmain.before_request(admin_required)\n\[email protected]('/')\ndef index():\n ms = Model.query.all()\n return render_template('node_index.html', node_list=ms)\n\n\n\[email protected]('/add', methods=['POST'])\ndef add():\n form = request.form\n Model.new(form)\n return redirect(url_for('.index'))\n\n\[email protected]('/delete/<int:id>')\ndef delete(id):\n m = Model.query.get(id)\n m.delete()\n return redirect(url_for('.index'))\n\n\[email protected]('/edit/<id>')\ndef edit(id):\n m = Model.query.get(id)\n return render_template('node_edit.html', node=m)\n\n\[email protected]('/update/<int:id>', methods=['POST'])\ndef update(id):\n form = request.form\n Model.update(id, form)\n return redirect(url_for('.index'))" }, { "alpha_fraction": 0.5807663202285767, "alphanum_fraction": 0.593163013458252, "avg_line_length": 31.036144256591797, "blob_id": "db4737c5909e8c508997ab20482055a808d64683", "content_id": "1983c75ac45616df7bbd4602c0171cfd83699e9b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2740, "license_type": "no_license", "max_line_length": 97, "num_lines": 83, "path": "/models/user.py", "repo_name": "yiheihu/bbs-demo", "src_encoding": "UTF-8", "text": "from . import ModelMixin\nfrom . import db\nfrom . import timestamp\nfrom admin import admin_id, admin_password\n\n\nclass User(db.Model, ModelMixin):\n __tablename__ = 'users'\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.Text(20))\n password = db.Column(db.Text(20))\n qq = db.Column(db.Text(20))\n email = db.Column(db.Text(20))\n signature = db.Column(db.Text(100))\n created_time = db.Column(db.Text(20))\n avatar = db.Column(db.Text(500))\n\n topics = db.relationship('Topic', backref='user')\n comments = db.relationship('Comment', backref='user')\n\n\n\n\n def __init__(self, form):\n super(User, self).__init__()\n self.username = form.get('username', '')\n self.password = form.get('password', '')\n self.email = form.get('email', '')\n self.signature = form.get('signature', '')\n self.qq = form.get('qq', '')\n self.created_time = timestamp()\n self.avatar = form.get('avatar', '0001.jpeg')\n\n\n def update(self, form):\n print('user.update, ', form)\n self.password = form.get('password', self.password)\n\n\n def salted_password(self,password):\n import hashlib\n salt = 'dfgeryu564'\n def sha1hex(str):\n ascii_str = str.encode('ascii')\n return hashlib.sha1(ascii_str).hexdigest()\n hash1 = sha1hex(password)\n hash2 = sha1hex(hash1 + salt)\n return hash2\n\n # 验证注册用户的合法性的\n def valid(self):\n valid_username = User.query.filter_by(username=self.username).first() == None\n print('self.password', self.password)\n valid_username_len = len(self.username) >= 6\n valid_password_len = len(self.password) >= 6\n msgs = []\n if not valid_username:\n message = '用户名已经存在'\n msgs.append(message)\n if not valid_username_len:\n message = '用户名长度必须大于等于 6'\n msgs.append(message)\n if not valid_password_len:\n message = '密码长度必须大于等于 6'\n msgs.append(message)\n status = valid_username and valid_username_len and valid_password_len\n self.password = self.salted_password(self.password)\n print('pwd', self.password)\n return status, msgs\n\n\n\n\n def valid_login(self, u):\n if u is not None:\n username_equals = u.username == self.username\n password_equals = u.password == self.salted_password(self.password)\n return username_equals and password_equals\n else:\n return False\n\n def is_admin(self):\n return self.username == 'admin_1' and self.id == admin_id and self.password == admin_password\n\n\n\n" }, { "alpha_fraction": 0.6806314587593079, "alphanum_fraction": 0.6915603876113892, "avg_line_length": 23.132352828979492, "blob_id": "ad32f9418bce4ab0efade877470a91f4db355529", "content_id": "b1837590833cd4555586fe14efbab2e09f2a6569", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1661, "license_type": "no_license", "max_line_length": 63, "num_lines": 68, "path": "/app.py", "repo_name": "yiheihu/bbs-demo", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom datetime import timedelta\nfrom flask import render_template\n\nfrom models import db\nfrom models.node import Node\nfrom models.topic import Topic , Comment\nfrom models.user import User\n\n\napp = Flask(__name__)\nmanager = Manager(app)\n\n\ndef register_routes(app):\n from routes.node import main as routes_node\n from routes.topic import main as routes_topic\n from routes.user import main as routes_user\n from routes.homepage import main as routes_homepage\n\n app.register_blueprint(routes_user, url_prefix='/user')\n app.register_blueprint(routes_topic, url_prefix='/topic')\n app.register_blueprint(routes_node, url_prefix='/node')\n app.register_blueprint(routes_homepage, url_prefix='/home')\n\ndef set_error(app):\n @app.errorhandler(401)\n def error401(e):\n return render_template('401.html')\n\n\ndef configured_app():\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\n import config\n app.secret_key = config.secret_key\n app.config['SQLALCHEMY_DATABASE_URI'] = config.db_uri\n # session有效时间的设置\n # app.permanent_session_lifetime = timedelta(minutes=1)\n db.init_app(app)\n register_routes(app)\n set_error(app)\n\n\n return app\n\n\[email protected]\ndef server():\n print('server run')\n config = dict(\n debug=True,\n host='0.0.0.0',\n port=3000,\n )\n app.run(**config)\n\n\ndef configure_manager():\n Migrate(app, db)\n manager.add_command('db', MigrateCommand)\n\n\nif __name__ == '__main__':\n configure_manager()\n configured_app()\n manager.run()\n\n\n\n\n\n\n" } ]
8
oueta/macf.py
https://github.com/oueta/macf.py
2cc38bf8390821fc665dc4a68c24a546ba8dd8be
5cff37c279f48807d5e7e1edadd44f9743f55081
09993cdeb7cda1820a2ede48d6b616bf1310bec2
refs/heads/master
2021-07-10T03:40:11.783195
2017-10-10T16:50:56
2017-10-10T16:50:56
105,795,445
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4928821325302124, "alphanum_fraction": 0.5096849203109741, "avg_line_length": 43.63541793823242, "blob_id": "4a51536ae557892e0fbbb62cd02aafe9e0f7af01", "content_id": "74d711f248e410e82bdf579c1565fd91b8f5666d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4285, "license_type": "no_license", "max_line_length": 102, "num_lines": 96, "path": "/macf.py", "repo_name": "oueta/macf.py", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nimport os\nimport sys\nimport re\nimport urllib.request\nimport csv\npath = os.path.dirname(os.path.realpath(__file__))\ndef search_csv(csv_path, search):\n with open(csv_path, encoding=\"utf8\") as csv_file:\n reader = csv.reader(csv_file, delimiter=',', quotechar='\"')\n for row in reader:\n if row and len(row) > 3:\n if row[1].upper() == search.upper():\n return row\ndef bit_reverse(x):\n x = int(x, 16)\n reverse = 0\n position = 7;\n for position in range(7,0,-1):\n reverse += (x & 1) << position\n x >>= 1\n return hex(reverse + x)[2:].zfill(2)\niab_url = 'https://standards.ieee.org/develop/regauth/iab/iab.csv'\niab_file = path + '/iab.csv'\nmas_url = 'https://standards.ieee.org/develop/regauth/oui36/oui36.csv'\nmas_file = path + '/mas.csv'\nmam_url = 'https://standards.ieee.org/develop/regauth/oui28/mam.csv'\nmam_file = path + '/mam.csv'\nmal_url = 'https://standards.ieee.org/develop/regauth/oui/oui.csv'\nmal_file = path + '/mal.csv'\nif len(sys.argv) > 1:\n if sys.argv[1] == '-d':\n try:\n print(\"Downloading IAB..\")\n urllib.request.urlretrieve(iab_url, iab_file)\n print(\"Downloading small block..\")\n urllib.request.urlretrieve(mas_url, mas_file)\n print(\"Downloading medium block..\")\n urllib.request.urlretrieve(mam_url, mam_file)\n print(\"Downloading large block..\")\n urllib.request.urlretrieve(mal_url, mal_file)\n print(\"Done!\")\n except:\n if os.path.isfile(iab_file): os.remove(iab_file)\n if os.path.isfile(mas_file): os.remove(mas_file)\n if os.path.isfile(mam_file): os.remove(mam_file)\n if os.path.isfile(mal_file): os.remove(mal_file)\n print(\"Incomplete download, files deleted\")\n else:\n mac_pattern = re.compile(\"^[0-9a-f.,:-]+$\", re.IGNORECASE)\n char_pattern = re.compile(\"^[0-9a-f]$\", re.IGNORECASE)\n if mac_pattern.search(sys.argv[1]) is not None:\n input_mac = sys.argv[1].lower()\n trim_mac = \"\"\n for i in range(len(input_mac)):\n if char_pattern.search(input_mac[i]) is not None:\n trim_mac += input_mac[i]\n if len(trim_mac) != 12:\n print(\"It's too short\" if len(trim_mac) < 12 else \"It's too long\")\n else:\n if os.path.isfile(mal_file) and os.path.isfile(mam_file) and os.path.isfile(mas_file):\n found_row = search_csv(iab_file, trim_mac[0:9])\n if found_row is None:\n found_row = search_csv(mas_file, trim_mac[0:9])\n if found_row is None:\n found_row = search_csv(mam_file, trim_mac[0:7])\n if found_row is None:\n found_row = search_csv(mal_file, trim_mac[0:6])\n print(found_row[2] if found_row is not None else \"Unknown Vendor\")\n # globally/locally = 0/1\n locally = (int(trim_mac[0:2], 16) & 2) >> 1\n print(\"Globally unique\" if locally == 0 else \"Locally administered\")\n # unicast/multicast = 0/1\n multicast = int(trim_mac[0:2], 16) & 1\n print(\"Unicast\" if multicast == 0 else \"Multicast\")\n # bit-reversed notation\n if len(sys.argv) > 2 and sys.argv[2] == \"-r\":\n tmp_mac = \"\"\n for i in range(0,12,+2): tmp_mac += bit_reverse(trim_mac[i:i+2])\n trim_mac = tmp_mac\n # trimmed\n print(trim_mac)\n # colon\n print(\"{}{}:{}{}:{}{}:{}{}:{}{}:{}{}\".format(*trim_mac))\n # dash\n print(\"{}{}-{}{}-{}{}-{}{}-{}{}-{}{}\".format(*trim_mac))\n # cisco\n print(\"{}{}{}{}.{}{}{}{}.{}{}{}{}\".format(*trim_mac))\n # huawei\n print(\"{}{}{}{}-{}{}{}{}-{}{}{}{}\".format(*trim_mac))\n else:\n print(\"Illegal characters\")\nelse:\n print(\"Usage: ./macf.py 1A:2B:3C:4D:5E:6F\")\n print(\"-d Download IEEE Public database\")\n print(\"-r Print bit-reversed notation\")\n" }, { "alpha_fraction": 0.6532846689224243, "alphanum_fraction": 0.7007299065589905, "avg_line_length": 18.571428298950195, "blob_id": "34d53c84fc9224552d02728fc36851fed2fe31a6", "content_id": "f63f4275b709354e080efd6d00b1a553173a15af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 274, "license_type": "no_license", "max_line_length": 61, "num_lines": 14, "path": "/README.md", "repo_name": "oueta/macf.py", "src_encoding": "UTF-8", "text": "## macf.py\n\n## Requirements\n/usr/bin/env python3\n\n## Usage\nDownload IEEE Public database:<br>\n./macf.py -d<br>\n\nDisplay MAC address vendor, flags and different formats: <br>\n./macf.py 1A:2B:3C:4D:5E:6F<br>\n\nPrint bit-reversed notation<br>\n./macf.py 1A:2B:3C:4D:5E:6F -r<br>\n" } ]
2
scanterog/yarcet
https://github.com/scanterog/yarcet
2deaaf62016ec0202b8364101874c049f98828f0
40c535ff604def374ea30d50b62d57b1eaec588a
38cea452cd843a1300ae4b453578f45a980b1fe4
refs/heads/master
2021-09-02T01:24:44.453366
2017-12-29T02:02:54
2017-12-29T16:31:55
115,737,120
4
0
null
null
null
null
null
[ { "alpha_fraction": 0.6451612710952759, "alphanum_fraction": 0.6600496172904968, "avg_line_length": 16.521739959716797, "blob_id": "283db5bffd7cf7ef08e7dc27d6ccd1e79eb3e8e7", "content_id": "22c81f37c5c3100d9c79552c8ab32d140a1ad5e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 403, "license_type": "no_license", "max_line_length": 40, "num_lines": 23, "path": "/yarcet", "repo_name": "scanterog/yarcet", "src_encoding": "UTF-8", "text": "#!/bin/bash\n# yarcet.py wrapper\nset -euo pipefail\ntopdir=$(dirname $(realpath $0))\ncd $topdir\npypath=$(which python3)\nif [ -z \"$pypath\" ]; then\n echo \"python3 required and not found.\"\n exit 1\nfi\nvenvpath='env'\nif [ ! -d \"$venvpath\" ]; then\n python3 -m venv $venvpath\n set +u\n . env/bin/activate\n pip install -r requirements.txt\n set -u\nfi\nset +u\n. env/bin/activate\nset -u\n\npython3 -m yarcet \"$@\"\n" }, { "alpha_fraction": 0.646118700504303, "alphanum_fraction": 0.6484017968177795, "avg_line_length": 26.375, "blob_id": "6b7a5a4e32efbd18d0923b1402619719cc01ebf9", "content_id": "00ae49d4cc1c4fecaaabf38e51c4f36dac2b71a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 438, "license_type": "no_license", "max_line_length": 64, "num_lines": 16, "path": "/recipes/add_user.sh", "repo_name": "scanterog/yarcet", "src_encoding": "UTF-8", "text": "#!/bin/sh -e\n\nuser=\"test\"\ngrep \"${user}:\" /etc/passwd\nif [ $? -eq 1 ]; then\n echo \"user $user does not exist, creating it.\"\n adduser --disabled-password --gecos '' $user\n echo \"$user ALL=(ALL) NOPASSWD: ALL\" >> /etc/sudoers.d/sudoers\n mkdir -p /home/$user/.ssh/\n cat > /home/$user/.ssh/authorized_keys << EOF\n# insert your key here\nEOF\n chown -R $user:$user /home/$user/.ssh\nelse\n echo \"$user already exist. Skipping creation.\"\nfi\n" }, { "alpha_fraction": 0.709770143032074, "alphanum_fraction": 0.7231800556182861, "avg_line_length": 42.47916793823242, "blob_id": "f9b04711e60aab1c1758cd1365c435a9aacf341a", "content_id": "2b78e07bf53df63495f8c0a9e34996eafac9f82e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2088, "license_type": "no_license", "max_line_length": 128, "num_lines": 48, "path": "/README.md", "repo_name": "scanterog/yarcet", "src_encoding": "UTF-8", "text": "# Yet Another Remote Command Execution Tool (YARCET)\n\nThe idea behind this simple tool is to execute the same \"recipe\" (set of instructions) to many servers \nsequentially or in parallel. Sometimes we don't need to write a playbook (Ansible) to execute some simple \nset of instructions and yarcet is useful for this reduced but still needed tasks.\n\nIn order to SSH-in to your nodes, your ssh pub key must be installed for your user in the remote node (`.ssh/authortized_keys`).\nSo far, the only authentication mode supported is SSH key based authentication through an SSH Agent.\n\nStill WIP. Parallel execution not supported so far. It will be added soon. Only tested on Linux. Python2 not supported.\n\n## Configuration file\n\nThe configuration file is a simple JSON file. By default, the tool looks for \"config.json\". Example:\n\n```\n{\n \"connection_mode\": \"sequential\",\n \"output_mode\": \"tee\",\n \"node_groups\": {\n \"example.org\": [\"host.example.org\", \"mail.example.org\", \"backup.example.org\"],\n \"staging\": [\"10.137.16.166\", \"10.137.16.64\", \"10.137.16.8\"]\n }, \n \"ssh\": {\n \"user\": \"scg\",\n \"sudo\": true,\n \"agent\": true\n }, \n \"log_path\": \"./logs\"\n}\n```\n\n### Directives:\n* `connection_mode` is either sequential or parallel. Sequential mode allows interactive session. This is useful in case you \nforgot to make your script fully non-interactive and hence we avoid hangs up.\n* `output_mode` is only available for sequential and ignored for parallel. It is either stdout or tee. \n\"tee\" is stdout + log to file. \"stdout\" can't be disabled for sequential.\n* `node_groups` specifies the groups of nodes (or clusters) we usually work with. The node group to work with must be selected \nin the command line as shown later below.\n* `ssh` allows us to specify information related to SSH auth and if the recipe will be executed with sudo. \n___Constraint___: \"agent\" must be true. Other auth method is not supported so far.\n* `log_path`: path where logs will be sent.\n\n# Usage example:\n```\n./yarcet staging recipes/add_user.sh\n```\n![output](https://people.sugarlabs.org/scg/yarcet.png)\n\n" }, { "alpha_fraction": 0.5507487654685974, "alphanum_fraction": 0.553337037563324, "avg_line_length": 28.557376861572266, "blob_id": "873d542493e8f8f267dc8bd7f35000bcd8f5278e", "content_id": "eda6eebdb249cd917f9285abe585314035476c5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5409, "license_type": "no_license", "max_line_length": 78, "num_lines": 183, "path": "/yarcet.py", "repo_name": "scanterog/yarcet", "src_encoding": "UTF-8", "text": "import argparse\nimport json\nimport sys\nimport os\nimport socket\nimport paramiko\nimport time\nimport os.path\nfrom termcolor import cprint\nfrom binascii import hexlify\n\n\nclass HKWarningPolicy(paramiko.WarningPolicy):\n def missing_host_key(self, client, hostname, key):\n print('Warning :: unknown %s host key for %s: %s' % (key.get_name(),\n hostname, hexlify(key.get_fingerprint())))\n\n\ndef manage_interactive_session(chan, log=None):\n import select\n import termios\n import tty\n\n oldtty = termios.tcgetattr(sys.stdin)\n tty.setraw(sys.stdin.fileno())\n tty.setcbreak(sys.stdin.fileno())\n try:\n while True:\n r, w, e = select.select([chan, sys.stdin], [], [])\n if chan in r:\n try:\n x = chan.recv(1024)\n if len(x) == 0:\n break\n cprint(x.decode(), 'white', 'on_grey', end='')\n sys.stdout.flush()\n if log:\n cout = x.decode().replace('\\r', '')\n log.write(cout)\n except socket.timeout:\n pass\n if sys.stdin in r:\n x = sys.stdin.read(1)\n if len(x) == 0:\n break\n chan.send(x)\n finally:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)\n\n\ndef run_cmd(node, config, log):\n client = paramiko.SSHClient()\n client.load_system_host_keys()\n client.set_missing_host_key_policy(HKWarningPolicy)\n try:\n client.connect(node, username=config['ssh']['user'],\n allow_agent=config['ssh']['agent'])\n except paramiko.ssh_exception.NoValidConnectionsError as e:\n print(\"Unable to ssh in to node %s\" % node)\n print(e)\n sys.exit(1)\n\n chan = client.get_transport().open_session()\n chan.set_combine_stderr(True)\n\n recipe = os.path.abspath(config['recipe'])\n basename = os.path.basename(recipe)\n remote_file = '/tmp/%d_%s' % (int(time.time()), basename)\n\n try:\n sftp = client.open_sftp()\n sftp.put(recipe, remote_file)\n sftp.close()\n except Exception as e:\n print(e)\n sys.exit(1)\n\n cmd = ''\n if config['ssh']['sudo']:\n chan.get_pty()\n cmd += 'sudo '\n cmd += 'sh %s; unlink %s' % (remote_file, remote_file)\n\n try:\n chan.exec_command(cmd)\n if config['connection_mode'] == 'sequential':\n manage_interactive_session(chan, log)\n retcode = chan.recv_exit_status()\n except paramiko.ssh_exception.SSHException as e:\n print('Error executing recipe in node %s' % node)\n print(e)\n sys.exit(1)\n\n client.close()\n\n return retcode\n\n\ndef run_sequential(config):\n log = None\n nodes = config['node_groups'][config['node_group']]\n if config['output_mode'] == 'tee':\n logname = config['node_group'] + '.log'\n logfile = os.path.join(config['log_path'], logname)\n log = open(logfile, 'w')\n\n for node in nodes:\n if log:\n log.write('Time: %s\\n' % time.strftime('%Y-%M-%d %H:%M:%S'))\n log.write('Node: %s\\n' % node)\n cprint('Node: %s' % node, 'green', 'on_grey', attrs=['bold'])\n retcode = run_cmd(node, config, log)\n exit_color = 'red' if retcode != 0 else 'green'\n cprint('(exit code: %d)\\n' % retcode, exit_color, 'on_white')\n if log:\n log.write('exit code: %d\\n\\n' % retcode)\n\n\ndef run_parallel(config):\n pass\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Execute recipes into remote nodes'\n )\n parser.add_argument('-c', '--config', dest='config',\n default='config.json',\n help='Configuration file (default: config.json)')\n parser.add_argument('node_group', metavar='node-group',\n help='Node group name where payload will be executed')\n parser.add_argument('recipe',\n help='Recipe to send and execute in remote node')\n parser.add_argument('-m', '--mode', dest='mode', required=False,\n choices=['p', 's'],\n help='Connection mode (p=parallel, s=sequential)')\n\n return parser.parse_args()\n\n\ndef parse_config(config_file):\n try:\n with open(config_file) as f:\n config = json.loads(f.read())\n except (IOError, json.decoder.JSONDecodeError) as e:\n print(\"[%s]: %s\" % (e.__class__.__name__, e))\n sys.exit(1)\n\n return config\n\n\ndef run(config):\n if config['connection_mode'] == 'sequential':\n run_sequential(config)\n elif config['connection_mode'] == 'parallel':\n run_parallel(config)\n else:\n print(\"Connection mode %s not valid\" % (config['connection_mode']))\n sys.exit(1)\n\n\ndef main():\n args = parse_args()\n config = parse_config(args.config)\n if args.node_group not in config['node_groups']:\n print(\"%s node group not defined in %s\" % (\n args.node_group, args.config))\n sys.exit(1)\n else:\n config['node_group'] = args.node_group\n\n config['recipe'] = args.recipe\n\n logpath = os.path.abspath(config['log_path'])\n config['log_path'] = logpath\n if not os.path.isdir(logpath):\n os.mkdir(logpath)\n\n run(config)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 6.5, "blob_id": "f157360269c7c5b1c16806ad9a98dd7695308d68", "content_id": "08e6dc58969ddbafefbc3d8fd874a27aa551e9c2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 15, "license_type": "no_license", "max_line_length": 8, "num_lines": 2, "path": "/recipes/disk.sh", "repo_name": "scanterog/yarcet", "src_encoding": "UTF-8", "text": "hostname\ndf -h\n" } ]
5
Andrew2000/DataScienceLearning
https://github.com/Andrew2000/DataScienceLearning
88c8c2204b84c1e27fc0a0bf1cfac34f188fd184
cfc03c4070ec5ed7056f3c50974578029cb0a3d1
b9ef646c3eda5ff9709405251ea4db7163ddaed6
refs/heads/master
2020-04-15T00:40:40.867528
2019-01-06T05:55:07
2019-01-06T05:55:07
164,248,657
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.649895191192627, "alphanum_fraction": 0.651991605758667, "avg_line_length": 19.782608032226562, "blob_id": "410b4abc6a4cc1e9b09f98abd4bbbd0a8455c1d4", "content_id": "57d5b1d511d7b21c5ee273a61f0bed1cb0346778", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 65, "num_lines": 23, "path": "/Quandl/quandl.py", "repo_name": "Andrew2000/DataScienceLearning", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport quandl\n\n\"\"\"\n# When we need to get new data from quandl\ndf = quandl.get('WIKI/GOOGL')\n\n# pickle it\ndf.to_pickle('quandlData.pkl')\n\"\"\"\ndef getQuandlForGOOGL():\n df = pd.read_pickle('quandlData.pkl')\n\n # write data frame to csv\n # df.to_csv('quandlData.csv', encoding='utf-8')\n\n # write data frame to json\n # df.to_json('quandlData.json', orient='records', index=True)\n\n print(df.head)\n\nif __name__==\"__main__\":\n getQuandlForGOOGL()" }, { "alpha_fraction": 0.8367347121238708, "alphanum_fraction": 0.8367347121238708, "avg_line_length": 23.5, "blob_id": "b9467e90ab82cee66d5c75b0c6daba5a3dfc9ca6", "content_id": "870917fdc6ba1badc49aa53cec1cf99eca24b4a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 49, "license_type": "no_license", "max_line_length": 26, "num_lines": 2, "path": "/README.md", "repo_name": "Andrew2000/DataScienceLearning", "src_encoding": "UTF-8", "text": "# DataScienceLearning\nRepo to learn Data Science\n" } ]
2
chuqigao/testrepo
https://github.com/chuqigao/testrepo
27c1319b3118ed942c8fcdc3330dc24e120ac00e
4600f44631eb8bd831c1e422c2d33416aa8163b6
b1cef6f77f3afb9bdf72a49bd03c4a3e4251e861
refs/heads/main
2023-02-21T01:24:37.723277
2021-01-20T18:55:59
2021-01-20T18:55:59
331,162,943
0
0
null
2021-01-20T01:55:29
2021-01-20T02:03:51
2021-01-20T18:56:00
Python
[ { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7272727489471436, "avg_line_length": 32, "blob_id": "4b25f950e502caec2a35a58254da8f5b60e098e2", "content_id": "fecc778c8d341efbe7e7cb4af0cab5b0626bd96b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/testchild.py", "repo_name": "chuqigao/testrepo", "src_encoding": "UTF-8", "text": "## Ading a new file in child branch\nprint (\"Inside Chind branch\")\n" } ]
1
onyonkaclifford/document-scanner
https://github.com/onyonkaclifford/document-scanner
9b106f3c04213d0d21a299444f2d7bfdd72cfb23
9c57ec82f990a02812b6ec0f95240646cc699a21
a9720be2129e72388feae88436cd4fbab0ca59ed
refs/heads/main
2023-07-18T09:43:00.984478
2021-08-26T13:38:05
2021-08-26T13:38:05
329,314,377
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6718162894248962, "alphanum_fraction": 0.6830897927284241, "avg_line_length": 28.386503219604492, "blob_id": "3e101b3840f2c7f8561b9e1f9a6ce03e3945c19b", "content_id": "1ddc0e055fc6b39deb147ba71e68938ed5e94d3f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4790, "license_type": "permissive", "max_line_length": 85, "num_lines": 163, "path": "/tests/test_unit.py", "repo_name": "onyonkaclifford/document-scanner", "src_encoding": "UTF-8", "text": "import copy\nimport os\n\nimport cv2\nimport numpy as np\nimport pytest\n\nfrom document_scanner import DocumentScanner\n\n\[email protected]\ndef image_path():\n return os.path.join(\"tests\", \"test_data\", \"img.png\")\n\n\[email protected]\ndef images():\n img_bgr = cv2.imread(os.path.join(\"tests\", \"test_data\", \"img.png\"))\n img_rgb = cv2.cvtColor(img_bgr.copy(), cv2.COLOR_BGR2RGB)\n return img_bgr, img_rgb\n\n\[email protected]\ndef edged_images(images):\n def get_edged_image(img):\n gray = cv2.cvtColor(img.copy(), cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, (5, 5), 0)\n edged_image = cv2.Canny(blurred, 75, 200)\n return edged_image\n\n img_bgr, img_rgb = images\n return get_edged_image(img_bgr), get_edged_image(img_rgb)\n\n\[email protected]\ndef contours(edged_images):\n def get_contours(edged_img):\n contours = cv2.findContours(\n edged_img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n\n if len(contours) == 2:\n resultant_contours = contours[0]\n elif len(contours) == 3: # Support for some versions of cv2\n resultant_contours = contours[1]\n else:\n raise Exception\n\n return resultant_contours\n\n edged_img_bgr, edged_img_rgb = edged_images\n return get_contours(edged_img_bgr), get_contours(edged_img_rgb)\n\n\[email protected]\ndef document_outlines(contours):\n def get_outline(cnts):\n \"\"\"\n :param cnts: contours\n \"\"\"\n cnts_sorted = sorted(copy.deepcopy(cnts), key=cv2.contourArea, reverse=True)\n document_outline = None\n\n for cnt in cnts_sorted:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.02 * perimeter, True)\n if len(approx) == 4:\n document_outline = approx\n break\n\n return document_outline\n\n contours_bgr, contours_rgb = contours\n return get_outline(contours_bgr), get_outline(contours_rgb)\n\n\ndef test_get_image(image_path):\n img_bgr, img_rgb = DocumentScanner.get_image(image_path)\n assert isinstance(img_bgr, np.ndarray)\n assert isinstance(img_rgb, np.ndarray)\n\n\ndef test_get_image__resized(image_path):\n img_bgr, img_rgb = DocumentScanner.get_image(image_path, 60)\n assert isinstance(img_bgr, np.ndarray)\n assert isinstance(img_rgb, np.ndarray)\n assert img_bgr.shape[1] == 60\n assert img_rgb.shape[1] == 60\n\n img_bgr__big, img_rgb__big = DocumentScanner.get_image(image_path, 600)\n assert isinstance(img_bgr__big, np.ndarray)\n assert isinstance(img_rgb__big, np.ndarray)\n assert img_bgr__big.shape[1] == 600\n assert img_rgb__big.shape[1] == 600\n\n\ndef test_get_image__empty_image_path():\n with pytest.raises(AttributeError):\n DocumentScanner.get_image(\"\")\n\n with pytest.raises(AttributeError):\n DocumentScanner.get_image(\"\", 60)\n\n\ndef test_get_processed_image(images):\n img_bgr, img_rgb = images\n\n img_processed_bgr = DocumentScanner.get_processed_image(img_bgr)\n assert isinstance(img_processed_bgr, np.ndarray)\n\n img_processed_rgb = DocumentScanner.get_processed_image(img_rgb)\n assert isinstance(img_processed_rgb, np.ndarray)\n\n\ndef test_get_text(images):\n img_bgr, img_rgb = images\n\n text_bgr = DocumentScanner.get_text(img_bgr)\n assert isinstance(text_bgr, str)\n\n text_rgb = DocumentScanner.get_text(img_rgb)\n assert isinstance(text_rgb, str)\n\n\ndef test_get_edged_image(images):\n img_bgr, img_rgb = images\n\n edged_img_bgr = DocumentScanner.get_edged_image(img_bgr)\n assert isinstance(edged_img_bgr, np.ndarray)\n\n edged_img_rgb = DocumentScanner.get_edged_image(img_rgb)\n assert isinstance(edged_img_rgb, np.ndarray)\n\n\ndef test_get_contours(edged_images):\n edged_img_bgr, edged_img_rgb = edged_images\n\n contours_bgr = DocumentScanner.get_contours(edged_img_bgr)\n assert isinstance(contours_bgr, list)\n\n contours_rgb = DocumentScanner.get_contours(edged_img_rgb)\n assert isinstance(contours_rgb, list)\n\n\ndef test_get_document_outline(contours):\n contours_bgr, contours_rgb = contours\n\n document_outline_bgr = DocumentScanner.get_document_outline(contours_bgr)\n assert isinstance(document_outline_bgr, np.ndarray)\n\n document_outline_rgb = DocumentScanner.get_document_outline(contours_rgb)\n assert isinstance(document_outline_rgb, np.ndarray)\n\n\ndef test_get_aligned_document(images, document_outlines):\n img_bgr, img_rgb = images\n document_outline_bgr, document_outline_rgb = document_outlines\n\n aligned_bgr = DocumentScanner.get_aligned_document(img_bgr, document_outline_bgr)\n assert isinstance(aligned_bgr, np.ndarray)\n\n aligned_rgb = DocumentScanner.get_aligned_document(img_rgb, document_outline_rgb)\n assert isinstance(aligned_rgb, np.ndarray)\n" }, { "alpha_fraction": 0.6009400486946106, "alphanum_fraction": 0.6138660311698914, "avg_line_length": 35.681034088134766, "blob_id": "cf912a3761a1d4d8a4ed32f751f6ba526f21ecfc", "content_id": "01b25dbba4ba97375813d4ceedac7eb685859a60", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4255, "license_type": "permissive", "max_line_length": 93, "num_lines": 116, "path": "/document_scanner/__init__.py", "repo_name": "onyonkaclifford/document-scanner", "src_encoding": "UTF-8", "text": "import cv2\nimport numpy as np\nimport pytesseract\nfrom imutils.perspective import four_point_transform\n\n\nclass DocumentScanner:\n @staticmethod\n def get_image(image_path: str, resize_width: int = None):\n \"\"\"\n Get image array from image path\n\n :param image_path: location of image\n :param resize_width: if not None image is resized to this width, else no resize\n :return: tuple of 2 image arrays, (bgr image, rgb image)\n \"\"\"\n image = cv2.imread(image_path)\n if resize_width is not None:\n h, w = image.shape[:2]\n ratio = resize_width / float(w)\n if resize_width < w:\n image = cv2.resize(\n image, (resize_width, int(h * ratio)), interpolation=cv2.INTER_AREA\n )\n elif resize_width > w:\n image = cv2.resize(\n image,\n (resize_width, int(h * ratio)),\n interpolation=cv2.INTER_LINEAR,\n )\n return image, cv2.cvtColor(image.copy(), cv2.COLOR_BGR2RGB)\n\n @staticmethod\n def get_processed_image(image: np.ndarray):\n \"\"\"\n Combines all processing steps to ready image for text extraction.\n Returned image retains colour channels format of image passed as argument.\n\n :param image: image to be processed\n :return: processed image ready for text extraction\n \"\"\"\n edged_image = DocumentScanner.get_edged_image(image, (5, 5), 75, 200)\n contours = DocumentScanner.get_contours(edged_image)\n document_outline = DocumentScanner.get_document_outline(contours)\n return DocumentScanner.get_aligned_document(image, document_outline)\n\n @staticmethod\n def get_text(aligned_image_rgb: np.ndarray, config=\"--psm 4\"):\n \"\"\"\n Text extraction. Colour channels format of image passed needs to be rgb.\n\n :param aligned_image_rgb: processed image that's ready for text extraction\n :param config: pytesseract image_to_string config\n :return: extracted text\n \"\"\"\n return pytesseract.image_to_string(\n cv2.cvtColor(aligned_image_rgb, cv2.COLOR_BGR2RGB), config=config\n )\n\n @staticmethod\n def get_edged_image(image: np.ndarray, ksize=(5, 5), threshold1=75, threshold2=200):\n \"\"\"\n Highlight edges in the image\n\n :param image: image to be edged\n :param ksize: gaussian blur kernel size\n :param threshold1: Canny algorithm value for edge linking\n :param threshold2: Canny algorithm value for finding initial segments of strong edges\n :return: edged image\n \"\"\"\n gray = cv2.cvtColor(image.copy(), cv2.COLOR_BGR2GRAY)\n blurred = cv2.GaussianBlur(gray, ksize, 0)\n return cv2.Canny(blurred, threshold1, threshold2)\n\n @staticmethod\n def get_contours(edged_image: np.ndarray):\n \"\"\"\n Returns contours\n\n :param edged_image: image that's been edged\n :return: contours\n \"\"\"\n contours = cv2.findContours(\n edged_image.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE\n )\n if len(contours) == 2:\n return contours[0]\n\n @staticmethod\n def get_document_outline(contours):\n \"\"\"\n Returns the outline of the document's edge\n\n :param contours: contours within the image\n :return: document outline\n \"\"\"\n contours = sorted(contours.copy(), key=cv2.contourArea, reverse=True)\n document_outline = None\n for contour in contours:\n perimeter = cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, 0.02 * perimeter, True)\n if len(approx) == 4:\n document_outline = approx\n break\n return document_outline\n\n @staticmethod\n def get_aligned_document(image: np.ndarray, document_outline):\n \"\"\"\n Crops the document from the background and aligns it orthogonally to the viewer\n\n :param image: original image\n :param document_outline: outline of the document\n :return: aligned document\n \"\"\"\n return four_point_transform(image.copy(), document_outline.copy().reshape(4, 2))\n" }, { "alpha_fraction": 0.7401515245437622, "alphanum_fraction": 0.7666666507720947, "avg_line_length": 44.517242431640625, "blob_id": "9660d1e5f1635b6e6944aa2d921d7c75da0eb9bc", "content_id": "955388faba941054bd38ee369dda5b00bffa9f2c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1320, "license_type": "permissive", "max_line_length": 140, "num_lines": 29, "path": "/README.md", "repo_name": "onyonkaclifford/document-scanner", "src_encoding": "UTF-8", "text": "![Tests workflow](https://github.com/onyonkaclifford/document-scanner/actions/workflows/tests.yml/badge.svg?branch=main)\n![Lint workflow](https://github.com/onyonkaclifford/document-scanner/actions/workflows/lint.yml/badge.svg?branch=main)\n[![License: MIT](https://img.shields.io/badge/license-MIT-green.svg)](https://github.com/onyonkaclifford/document-scanner/blob/main/LICENSE)\n[![Imports: isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/)\n[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)\n[![Code style: flake8](https://img.shields.io/badge/code%20style-flake8-orange.svg)](https://gitlab.com/pycqa/flake8)\n\n# Document scanner\nExtract text from photos\n\nTesseract for OCR is required. To install on Ubuntu: `sudo apt-get install tesseract-ocr`\n\nExample usage:\n```python\nimport cv2\nfrom document_scanner import DocumentScanner\n\nimg_bgr, img_rgb = DocumentScanner.get_image(\"assets/img.png\")\nimg_processed = DocumentScanner.get_processed_image(img_bgr)\ntext = DocumentScanner.get_text(img_processed)\n\nprint(text)\n\ncv2.imshow(\"Image BGR\", img_bgr)\ncv2.imshow(\"Image RGB\", img_rgb)\ncv2.imshow(\"Processed img\", img_processed)\nif cv2.waitKey(0) == 27:\n cv2.destroyAllWindows()\n```\n" }, { "alpha_fraction": 0.5759999752044678, "alphanum_fraction": 0.7039999961853027, "avg_line_length": 40.66666793823242, "blob_id": "c2cd4d86b01cba51ce0d2cd5f203ccc636fea7a0", "content_id": "876dd06eddad67beaf9453dd060f8841dabd37b1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 250, "license_type": "permissive", "max_line_length": 104, "num_lines": 6, "path": "/requirements.txt", "repo_name": "onyonkaclifford/document-scanner", "src_encoding": "UTF-8", "text": "imutils~=0.5.4\nnumpy~=1.19.5 # Newer versions (>=v1.20.*) don't support python 3.6\nopencv-python~=4.5.2.54\npre-commit~=2.13.0\npytesseract~=0.3.7\nscipy~=1.5.4 # Undocumented requirement for imutils, newer versions (>=v1.20.*) don't support python 3.6\n" } ]
4
shiveshchoudhary/hand_digits_recognition
https://github.com/shiveshchoudhary/hand_digits_recognition
672f55252abd3c0f245b35e888919ee93040eb8c
6376928876d14b9e15470955ba7611bc08a12b95
112544ad59839e1350a2dac122b91bfa39682f48
refs/heads/master
2021-06-05T09:43:14.969157
2020-12-29T17:54:25
2020-12-29T17:54:25
143,036,178
0
0
null
2018-07-31T15:54:38
2020-10-05T12:43:22
2020-10-05T12:44:38
Python
[ { "alpha_fraction": 0.650632917881012, "alphanum_fraction": 0.7493671178817749, "avg_line_length": 19.789474487304688, "blob_id": "215502666ac51f23027625a0eb3f81b0cb294683", "content_id": "7f53f0e1571273477a105be62a2293870d9c6ad6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 74, "num_lines": 19, "path": "/plot_data/plot_data.py", "repo_name": "shiveshchoudhary/hand_digits_recognition", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as pt\nfrom sklearn.tree import DecisionTreeClassifier\n\ndata=pd.read_csv(\"C:/Users/cc102tx/Videos/Captures/train.csv\").as_matrix()\ncl=DecisionTreeClassifier()\ntrain=data[0:21000,1:]\ntrain_l=data[0:21000,0]\n\ncl.fit(train,train_l)\n\ntest=data[21000:,1:]\ntest_l=data[21000:,0]\n\nd=test[564]\nd.shape=(28,28)\n\npt.imshow(255-d,cmap=\"gray\")\n" }, { "alpha_fraction": 0.6198704242706299, "alphanum_fraction": 0.7192224860191345, "avg_line_length": 19.954545974731445, "blob_id": "2cd5e7d6f410c8870b8c499578a7e7d11842d73a", "content_id": "f7c0a01f352d5212cb6e6e9d15eb2f42f52177a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 463, "license_type": "no_license", "max_line_length": 74, "num_lines": 22, "path": "/check_accuracy/check_accuracy.py", "repo_name": "shiveshchoudhary/hand_digits_recognition", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as pt\nfrom sklearn.tree import DecisionTreeClassifier\n\ndata=pd.read_csv(\"C:/Users/cc102tx/Videos/Captures/train.csv\").as_matrix()\ncl=DecisionTreeClassifier()\ntrain=data[0:21000,1:]\ntrain_l=data[0:21000,0]\n\ncl.fit(train,train_l)\n\ntest=data[21000:,1:]\ntest_l=data[21000:,0]\n\n\np=cl.predict(test)\n\nct=0\nfor i in range(21000):\n ct+=1 if p[i]==test_l[i] else 0\nprint \"Accuracy:\",(ct*100.0)/21000,\"%\"\n\n\n" }, { "alpha_fraction": 0.7776119112968445, "alphanum_fraction": 0.7798507213592529, "avg_line_length": 40.875, "blob_id": "d23cefde06c2dfbf175ec7dad72b08d17567e8c3", "content_id": "3d9da57882850eeb9130d9d83279a8eaca1fab3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1342, "license_type": "no_license", "max_line_length": 339, "num_lines": 32, "path": "/README.md", "repo_name": "shiveshchoudhary/hand_digits_recognition", "src_encoding": "UTF-8", "text": "### Hand_digits_recognition\nThis project is based on recognising hand written digits using machine learning linear classifiers.\n\n### Knowledge required\n\nNumpy\nScikit \nPandas \nDecision Tree Classifiers \n\n### TESTING AND TRAINING SETS\n[Kaggle testing and training csv file links](https://www.kaggle.com/c/digit-recognizer/data)\n\n\n### Decision Tree Classifiers\nDecision Tree Classifier, repetitively divides the working area(plot) into sub part by identifying lines.\n\n#### Aspects \n#### 1)IMPURITY \nImpurity is when we have a traces of one class division into other. This can arise due to following reason\n\nWe run out of available features to divide the class upon.\nWe tolerate some percentage of impurity (we stop further division) for faster performance. \n\n#### 2)Entropy \nEntropy is degree of randomness of elements or in other words it is measure of impurity.\n\n#### 3)Information Gain \nInformation Gain (n) =Entropy(x) — ([weighted average] * entropy(children for feature)).\n\n### Final Thoughts \nDividing efficiently based on maximum information gain is key to decision tree classifier. However, in real world with millions of data dividing into pure class in practically not feasible (it may take longer training time) and so we stop at points in nodes of tree when fulfilled with certain parameters (for example impurity percentage).\n" }, { "alpha_fraction": 0.7967032790184021, "alphanum_fraction": 0.7967032790184021, "avg_line_length": 24.85714340209961, "blob_id": "5600a217affa93ab842f403ddc5978081856ae8c", "content_id": "8f00ec8ee21e2d8d712c6cc573e9a5a439170bf6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 182, "license_type": "no_license", "max_line_length": 47, "num_lines": 7, "path": "/show_data/show_data.py", "repo_name": "shiveshchoudhary/hand_digits_recognition", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as pt\nfrom sklearn.tree import DecisionTreeClassifier\n\ndt=pd.read_csv(\"desktop/train.csv\")\nprint(dt) #gives csv file \n" } ]
4
traviscrawford/airflow-pex-example
https://github.com/traviscrawford/airflow-pex-example
fd4d7a5e21f00377cc99468213ad7bea1037a692
2691fd0d01eac2427a269650e2733e1c498a674b
b0948a56ed1805754691942c631a142eeff4a7bb
refs/heads/master
2021-07-21T18:14:19.316760
2018-08-14T02:45:44
2018-08-14T02:45:44
129,462,715
5
4
Apache-2.0
2018-04-13T23:13:48
2022-10-11T08:03:19
2018-08-14T14:30:28
Shell
[ { "alpha_fraction": 0.7177848815917969, "alphanum_fraction": 0.7188498377799988, "avg_line_length": 48.421051025390625, "blob_id": "9cb3c9c0636640e7dff10b64989ab8f2b309c21d", "content_id": "65eed2b0cbc8fdf33368b30410bc0405a5953be9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 939, "license_type": "permissive", "max_line_length": 108, "num_lines": 19, "path": "/bin/lib/common.sh", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "set -euo pipefail\n\n[[ -f .current_project ]] || change_project example_project\nCURRENT_DAG_FOLDER=$(cat .current_project)\n[[ -z $CURRENT_DAG_FOLDER ]] && { printf \"CURRENT_DAG_FOLDER not set\\n\"; exit 1; }\n\nmkdir -p \"$(pwd)/airflow/${CURRENT_DAG_FOLDER}\"\n\nexport AIRFLOW_HOME=\"$(pwd)/src/dags/${CURRENT_DAG_FOLDER}\"\nexport AIRFLOW__CORE__AIRFLOW_HOME=\"$(pwd)/src/dags/${CURRENT_DAG_FOLDER}\"\nexport AIRFLOW__CORE__DAGS_FOLDER=\"$(pwd)/src/dags/${CURRENT_DAG_FOLDER}/dags\"\nexport AIRFLOW__CORE__BASE_LOG_FOLDER=\"$(pwd)/airflow/${CURRENT_DAG_FOLDER}/logs\"\nexport AIRFLOW__CORE__SQL_ALCHEMY_CONN=\"sqlite:///$(pwd)/airflow/${CURRENT_DAG_FOLDER}/airflow.db\"\nexport AIRFLOW__CORE__LOAD_EXAMPLES=\"False\"\nexport AIRFLOW__CORE__PLUGINS_FOLDER=\"$(pwd)/src/dags/${CURRENT_DAG_FOLDER}/plugins\"\nexport AIRFLOW__SCHEDULER__CHILD_PROCESS_LOG_DIRECTORY=\"$(pwd)/airflow/${CURRENT_DAG_FOLDER}/logs/scheduler\"\n\n# Dev key\nexport AIRFLOW__CORE__FERNET_KEY=\"niGEKijk3iVgtoybG5w049OZo2kQz-ZY4p14f0gAlRs=\"\n" }, { "alpha_fraction": 0.6715328693389893, "alphanum_fraction": 0.6715328693389893, "avg_line_length": 21.83333396911621, "blob_id": "d11ba722bfbd340120af7f22ce441cbb65eb34ef", "content_id": "e1d13e6f2e432119c4bb4c019c897d0f5f421f62", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 137, "license_type": "permissive", "max_line_length": 50, "num_lines": 6, "path": "/bin/airflow", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -euo pipefail\nsource bin/lib/common.sh\n\nexec ./pants -q run \\\n \"src/py/example/airflow/cli:airflow_cli\" -- \"$@\"\n" }, { "alpha_fraction": 0.691495954990387, "alphanum_fraction": 0.7228429317474365, "avg_line_length": 27.026086807250977, "blob_id": "71fcfd079d855dde227f0b7caaa661933fcd6f10", "content_id": "810bcc31352159d7a65914af908bdaca76b3ed55", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3222, "license_type": "permissive", "max_line_length": 305, "num_lines": 115, "path": "/README.md", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "# airflow-pex-example\n\nDeploying [Apache Airflow](https://airflow.apache.org/) typically involves\ninstalling Airflow and your custom libraries in a `virtualenv` on the production\nhost, along with your and DAGs & other files (e.g.: SQL). To simplify\ndeployment, here we explore using [pex](https://pex.readthedocs.io).\n\nBuild an Airflow `pex` by running:\n\n```\n./pants binary src/python/example/airflow\n```\n\nThis produces `airflow.pex`, a single file that is analogous to a\nstatically-lined binary. It's a self-contained, runnable Airflow you can\n`scp` to another machine and run.\n\nYou can then run Airflow commands as usual, using `dist/airflow.pex`.\n\n```\n$ ./dist/airflow.pex list_dags\n[2018-04-13 17:34:39,885] {__init__.py:45} INFO - Using executor SequentialExecutor\n[2018-04-13 17:34:39,924] {models.py:189} INFO - Filling up the DagBag from /home/travis/airflow/dags\n\n\n-------------------------------------------------------------------\nDAGS\n-------------------------------------------------------------------\nexample_bash_operator\nexample_branch_dop_operator_v3\nexample_branch_operator\nexample_http_operator\nexample_passing_params_via_test_command\nexample_python_operator\nexample_short_circuit_operator\nexample_skip_dag\nexample_subdag_operator\nexample_subdag_operator.section-1\nexample_subdag_operator.section-2\nexample_trigger_controller_dag\nexample_trigger_target_dag\nexample_xcom\nlatest_only\nlatest_only_with_trigger\ntest_utils\ntutorial\n```\n\nYou can then `scp` or otherwise distribute this file to a production host.\n\n\n## python_app example\n\nWhen using Pants 1.7.0rc0 or later which contains `python_app` support we can\nbuild a self-contained binary along with DAGs in a deployable artifact.\n\nNote how `src/dags:analytics` contains a directory of DAGs, which may be useful\nif multiple teams require separate DAGs, or you can use just one DAG dir.\n\n```\n$ ./pants bundle src/dags:analytics --bundle-py-archive=tgz\n$ $ cd dist/src.dags.analytics-bundle/\n$ $ find .\n.\n./main.pex\n./analytics\n./analytics/analytics_daily.pyc\n./analytics/analytics_daily.py\n$ AIRFLOW_HOME=$(pwd) AIRFLOW__CORE__DAGS_FOLDER=$(pwd)/analytics ./main.pex list_tasks analytics_daily\n[2018-06-01 17:34:47,127] {__init__.py:45} INFO - Using executor SequentialExecutor\n[2018-06-01 17:34:47,174] {models.py:189} INFO - Filling up the DagBag from /Users/travis/src/airflow-pex-example/dist/src.dags.analytics-bundle/analytics\nprint_date\n```\n\n## Working environment\n\nWe show a working example of Airflow integrated into pants for development. [direnv](https://direnv.net/) is required to load `bin/airflow` and `bin/gunicorn` directly into the environment. This is explained in detail in `bin/README.md`. We use pyenv in the example, which is recommended but not required.\n\n#### Install dependencies\n\n\tbrew install direnv pyenv openssl\n\t\n#### Copy the environment file\n\n\tcp .envrc.example .envrc && direnv allow\n\t\n#### Build\n\n\tmake\n\n#### View the help \n\n\tairflow --help\n\t\n#### List the available DAGs\n\n\tairflow list_dags\n\t\n#### Initialize the database\n\n\tairflow initdb \n\t\n#### Run the example workflow\n\n\tairflow backfill analytics_daily -s 2018-01-01 -e 2018-01-01\n\n\t\n#### List projects\n\n\tchange_project\n\n\t\n#### Change project\n\n\tchange_project eng" }, { "alpha_fraction": 0.6593406796455383, "alphanum_fraction": 0.7252747416496277, "avg_line_length": 17.200000762939453, "blob_id": "2d77384745e63beb5b2f1ff1229715af394dbe61", "content_id": "bcc9284ca2838cf84ef8de54764b8498c88d48cc", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 91, "license_type": "permissive", "max_line_length": 41, "num_lines": 5, "path": "/pants.ini", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "[GLOBAL]\npants_version: 1.7.0rc0\n\n[python-setup]\ninterpreter_constraints: [\"CPython>=3.6\"]\n" }, { "alpha_fraction": 0.586416482925415, "alphanum_fraction": 0.6955602765083313, "avg_line_length": 74.69999694824219, "blob_id": "5bf4926c20f081dd2f75499774728cbcbcb25c3a", "content_id": "c65a0d80404d73dc8f605ed8ee8ce6f0be7db32c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3790, "license_type": "permissive", "max_line_length": 311, "num_lines": 50, "path": "/bin/README.md", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "# direnv integration\n\nAn awesome feature of direnv is the ability to load an entire directory into the path. We make use of this feature to add an `airflow` symbol directly into the environment. This avoids the need to execute Airflow through the pants cli, providing a completely native experience. \n\nThe use of direnv is not superficial though. It's necessary to have `airflow` in the environment because the cli makes use of it when running individual tasks. \n\nFor example, consider launching the example dag in the main README.\n\n\t➜ airflow backfill analytics_daily -s 2018-01-01 -e 2018-01-01\n\t\n\t[2018-08-13 15:26:26,933] {__init__.py:45} INFO - Using executor SequentialExecutor\n\t[2018-08-13 15:26:26,972] {models.py:189} INFO - Filling up the DagBag from /Users/waldo/src/airflow/pex-example/src/dags/analytics/dags\n\t[2018-08-13 15:26:27,177] {models.py:1197} INFO - Dependencies all met for <TaskInstance: analytics_daily.print_date 2018-01-01 00:00:00 [scheduled]>\n\t[2018-08-13 15:26:27,180] {base_executor.py:49} INFO - Adding to queue: airflow run analytics_daily print_date 2018-01-01T00:00:00 --local -sd DAGS_FOLDER/analytics_daily.py\n\t[2018-08-13 15:26:32,131] {sequential_executor.py:40} INFO - Executing command: airflow run analytics_daily print_date 2018-01-01T00:00:00 --local -sd DAGS_FOLDER/analytics_daily.py\n\t[2018-08-13 15:26:34,571] {__init__.py:45} INFO - Using executor SequentialExecutor\n\t[2018-08-13 15:26:34,613] {models.py:189} INFO - Filling up the DagBag from /Users/waldo/src/airflow/pex-example/src/dags/analytics/dags/analytics_daily.py\n\t[2018-08-13 15:26:34,664] {base_task_runner.py:115} INFO - Running: ['bash', '-c', 'airflow run analytics_daily print_date 2018-01-01T00:00:00 --job_id 2 --raw -sd DAGS_FOLDER/analytics_daily.py']\n\t\nYou can see in the following line that Airflow calls out to itself via the shell:\n\n`Executing command: airflow run analytics_daily print_date 2018-01-01T00:00:00 --local -sd DAGS_FOLDER/analytics_daily.py\n[2018-08-13 15:26:34,571] {__init__.py:45} INFO - Using executor SequentialExecutor`\n\nThis is the main reason to use direnv.\n\nSimilarly, for the webserver.\n\n\t➜ pex-example git:(complete-airflow-features) ✗ airflow webserver\n\t[2018-08-13 15:29:16,636] {__init__.py:45} INFO - Using executor SequentialExecutor\n\t ____________ _____________\n\t ____ |__( )_________ __/__ /________ __\n\t____ /| |_ /__ ___/_ /_ __ /_ __ \\_ | /| / /\n\t___ ___ | / _ / _ __/ _ / / /_/ /_ |/ |/ /\n\t _/_/ |_/_/ /_/ /_/ /_/ \\____/____/|__/\n\t\n\t/Users/waldo/src/airflow/pex-example/.pants.d/pyprep/requirements/CPython-3.6.4/81e1b0096394cb92c7b9d075a675485c599e27d4-DefaultFingerprintStrategy_3f82b95bd138/.deps/Flask-0.11.1-py2.py3-none-any.whl/flask/exthook.py:71: ExtDeprecationWarning: Importing flask.ext.cache is deprecated, use flask_cache instead.\n\t .format(x=modname), ExtDeprecationWarning\n\t[2018-08-13 15:29:16,894] {models.py:189} INFO - Filling up the DagBag from /Users/joe.napolitano/src/airflow/pex-example/src/dags/analytics/dags\n\tRunning the Gunicorn Server with:\n\tWorkers: 4 sync\n\tHost: 0.0.0.0:8080\n\tTimeout: 120\n\tLogfiles: - -\n\t=================================================================\n\tRunning: gunicorn -w 4 -k sync -t 120 -b 0.0.0.0:8080 -n airflow-webserver -p /Users/waldo/src/airflow/pex-example/src/dags/analytics/airflow-webserver.pid -c python:airflow.www.gunicorn_config --access-logfile - --error-logfile - airflow.www.app:\n\t[2018-08-13 15:29:20 -0400] [20075] [INFO] Starting gunicorn 19.9.0\n\t\nWhich calls out to:\n`gunicorn -w 4 -k sync -t 120 -b 0.0.0.0:8080 -n airflow-webserver -p /Users/waldo/src/airflow/pex-example/src/dags/analytics/airflow-webserver.pid -c python:airflow.www.gunicorn_config --access-logfile - --error-logfile - airflow.www.app:`" }, { "alpha_fraction": 0.5777778029441833, "alphanum_fraction": 0.7111111283302307, "avg_line_length": 21.5, "blob_id": "bdd648938ee8ea03b5f6f7ddf1d78ed4d208c6ea", "content_id": "e66feb38e6159eb849b7b40aa813a2ec3bc923e6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": true, "language": "Text", "length_bytes": 45, "license_type": "permissive", "max_line_length": 29, "num_lines": 2, "path": "/3rdparty/python/requirements.txt", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "apache-airflow[crypto]==1.9.0\nipython==6.3.1\n" }, { "alpha_fraction": 0.691428542137146, "alphanum_fraction": 0.691428542137146, "avg_line_length": 24, "blob_id": "0861c0485e9c62f2989815eb91ea6023f4305d9e", "content_id": "335a2f1ced91c81087353cefabb917a8bd14530c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 175, "license_type": "permissive", "max_line_length": 60, "num_lines": 7, "path": "/bin/gunicorn", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -euo pipefail\nsource bin/lib/common.sh\n\necho \"Running: gunicorn $@\"\nexec ./pants -q run \\\n \"src/py/example/airflow/gunicorn:airflow_gunicorn\" -- \"$@\"\n" }, { "alpha_fraction": 0.6346153616905212, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 19.799999237060547, "blob_id": "9b4575b2e60420b63edaa3119ba51d1c804fe2a7", "content_id": "e895377ed47122e3376afca33d0a18e1ddc34fa9", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 520, "license_type": "permissive", "max_line_length": 66, "num_lines": 25, "path": "/src/dags/analytics/dags/analytics_daily.py", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "from datetime import datetime, timedelta\n\nfrom airflow import DAG\nfrom example.airflow.operators.bash_operator import MyBashOperator\n\n\ndag = DAG(\n dag_id='analytics_daily',\n default_args={\n 'owner': 'airflow',\n 'depends_on_past': False,\n 'start_date': datetime(2015, 6, 1),\n 'email': ['[email protected]'],\n 'email_on_failure': False,\n 'email_on_retry': False,\n 'retries': 1,\n 'retry_delay': timedelta(minutes=5),\n },\n)\n\ntask = MyBashOperator(\n task_id='print_date',\n bash_command='date',\n dag=dag\n)\n" }, { "alpha_fraction": 0.6730769276618958, "alphanum_fraction": 0.6730769276618958, "avg_line_length": 14.600000381469727, "blob_id": "ce94a863d8327b5ff8bc654bb8c011e90f47de6b", "content_id": "7422a16ac2cd474f1dab2d6e7ddc9353fc21d5ea", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 312, "license_type": "permissive", "max_line_length": 33, "num_lines": 20, "path": "/Makefile", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": ".DEFAULT_GOAL := build\n.PHONY: dist\n\n# Build everything\nbuild:\n\t./pants test compile src::\n\ndist:\n\t./pants bundle src::\n\n# Preserves build cache\nclean:\n\t@rm -rf airflow/\n\t@rm -rf dist/\n\n# Requires long build\nclean-all: clean\n\t@./pants clean-all\n\t@rm -rf .pants.d/ .cache/ .pids/\n\t@rm -f .pants.workdir.file_lock\n" }, { "alpha_fraction": 0.6270270347595215, "alphanum_fraction": 0.6324324607849121, "avg_line_length": 25.428571701049805, "blob_id": "2f5a27c08ee9d1ac9b96f56c9728e655a0b6ac35", "content_id": "91f77cbfc219d9574292bd466b0eba0d9f7ad27c", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "permissive", "max_line_length": 60, "num_lines": 7, "path": "/src/py/example/airflow/gunicorn/gunicorn.py", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom gunicorn.app.wsgiapp import WSGIApplication\n\nif __name__ == '__main__':\n WSGIApplication(\"%(prog)s [OPTIONS] [APP_MODULE]\").run()\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 19.399999618530273, "blob_id": "e554437e8bd136393f4d0ddc54336350c3ad69a4", "content_id": "948f381c98bd147dc5e42fe687d0467b8d4ea01e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "permissive", "max_line_length": 56, "num_lines": 5, "path": "/src/py/example/airflow/operators/bash_operator.py", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "from airflow.operators.bash_operator import BashOperator\n\n\nclass MyBashOperator(BashOperator):\n pass\n" }, { "alpha_fraction": 0.6066945791244507, "alphanum_fraction": 0.6192468404769897, "avg_line_length": 28.875, "blob_id": "e2bb97789784d7851c71348584b48d124c0b495e", "content_id": "9b6b4ddfd723bc15367092952a6f7fb6a1d0259e", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 239, "license_type": "permissive", "max_line_length": 85, "num_lines": 8, "path": "/bin/change_project", "repo_name": "traviscrawford/airflow-pex-example", "src_encoding": "UTF-8", "text": "#!/usr/bin/env bash\nset -euo pipefail\n\n[[ ! -z ${1-} ]] || { printf \"Usage: change_project [$(ls -m src/dags)]\\n\"; exit 1; }\n\nPROJECT_NAME=$1\nprintf \"Changing project: %s\\n\" \"$PROJECT_NAME\"\nprintf \"%s\\n\" \"$PROJECT_NAME\" > .current_project\n" } ]
12
rostrage/node_usvfs
https://github.com/rostrage/node_usvfs
d1f0e7e017e8ad4217738b4ae85f02e874ee31b7
75b6a4fc8a6b146b092f680d3cd251341617babe
8c11d9409d3debee359d5bcd9058edbb308cef23
refs/heads/master
2022-01-04T21:14:24.592154
2018-10-02T04:40:34
2018-10-02T04:40:34
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5181317329406738, "alphanum_fraction": 0.5671598315238953, "avg_line_length": 32.79411697387695, "blob_id": "572dc29d894a16785e163601fdcffa8d83919a14", "content_id": "4f3a3fbc074530573a91fa366c6000bea377c3db", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3447, "license_type": "permissive", "max_line_length": 90, "num_lines": 102, "path": "/src/cpp/module.gyp", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "{\n\t\"variables\": {\n\t\t\"boost_lib\": \"<!(node -p \\\"process.env.BOOST_LIB || '../../../deps/boost/stage/lib'\\\")\",\n\t\t\"boost_dir\": \"<!(node -p \\\"process.env.BOOST_DIR || '../../deps/boost'\\\")\",\n\n\t\t\"conditions\": [\n\t\t\t[\"target_arch=='x64'\", {\n\t\t\t\t\"arch\": \"x64\",\n\t\t\t}],\n\t\t\t[\"target_arch=='ia32'\", {\n\t\t\t\t\"arch\": \"x32\",\n\t\t\t}],\n\t\t],\n\t},\n\n\t\"targets\": [\n\t\t{\n\t\t\t\"target_name\": \"<(module_name)\",\n\t\t\t\"cflags!\": [ \"-fno-exceptions\" ],\n \"cflags_cc!\": [ \"-fno-exceptions\" ],\n\t\t\t\"dependencies\": [\n\t\t\t\t\"../../deps/deps.gyp:usvfs\",\n\t\t\t\t\"<!(node -p \\\"require('node-addon-api').gyp\\\")\"\n\t\t\t],\n\t\t\t\"defines\": [\n\t\t\t\t\"BUILDING_USVFS_DLL\",\n\t\t\t\t\"ASMJIT_STATIC\",\n\t\t\t\t\"SPDLOG_NO_NAME\",\n\t\t\t\t\"SPDLOG_NO_REGISTRY_MUTEX\",\n\t\t\t\t\"NOMINMAX\",\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\",\n\t\t\t\t\"NAPI_CPP_EXCEPTIONS\",\n\t\t\t\t\"_HAS_EXCEPTIONS=1\"\n\t\t\t],\n\t\t\t\"libraries\": [\n\t\t\t\t\"Shlwapi.lib\",\n\t\t\t\t\"Version.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_atomic-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_atomic-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_chrono-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_chrono-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_context-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_context-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_coroutine-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_coroutine-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_date_time-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_date_time-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_filesystem-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_filesystem-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_locale-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_locale-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_log-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_log-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_log_setup-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_log_setup-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_regex-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_regex-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_system-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_system-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_thread-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\t\"<(boost_lib)/libboost_thread-vc141-mt-sgd-<(arch)-1_67.lib\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"<!@(node -p \\\"require('node-addon-api').include\\\")\",\n\t\t\t\t\"<!@(node -p \\\"require('napi-thread-safe-callback').include\\\")\",\n\t\t\t\t\".\",\n\t\t\t\t\"../../deps/usvfs/include\",\n\t\t\t\t\"../../deps/usvfs/src/usvfs_dll\",\n\t\t\t\t\"../../deps/usvfs/src/shared\",\n\t\t\t\t\"../../deps/usvfs/src/thooklib\",\n\t\t\t\t\"../../deps/usvfs/src/tinjectlib\",\n\t\t\t\t\"../../deps/usvfs/src/usvfs_helper\",\n\t\t\t\t\"../../deps/usvfs/asmjit/src/asmjit\",\n\t\t\t\t\"../../deps/usvfs/udis86\",\n\t\t\t\t\"<(boost_dir)\",\n\t\t\t\t\"../../deps/usvfs/fmt\",\n\t\t\t\t\"../../deps/usvfs/spdlog/include/spdlog\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"node_usvfs.cc\",\n\t\t\t\t\"bindings.cc\"\n\t\t\t],\n\t\t\t\"configurations\": {\n\t\t\t\t\"Release\": {\n\t\t\t\t\t\"msvs_settings\": {\n\t\t\t\t\t\t\"VCCLCompilerTool\": {\n\t\t\t\t\t\t\t\"ExceptionHandling\": 1,\n\t\t\t\t\t\t\t\"RuntimeTypeInfo\": \"true\"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t\"msvs_configuration_attributes\": {\n\t\t\t\t\t\t\"CharacterSet\": 1\n\t\t\t\t\t},\n\t\t\t\t\t\"msbuild_toolset\": \"v141\",\n\t\t\t\t\t\"msvs_windows_target_platform_version\": \"10.0.16299.0\"\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t]\n}\n" }, { "alpha_fraction": 0.7076923251152039, "alphanum_fraction": 0.7076923251152039, "avg_line_length": 23.375, "blob_id": "9c6a200511a0858e610b9c3c604e342da3f155ad", "content_id": "94d1983ca0ed1c9cef7ee2f84a1452b552bb5c37", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 195, "license_type": "permissive", "max_line_length": 56, "num_lines": 8, "path": "/src/cpp/bindings.cc", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "#include <napi.h>\n#include <node_usvfs.h>\n\nNapi::Object Init(Napi::Env env, Napi::Object exports) {\n\treturn node_usvfs::USVFS::Init(env, exports);\n}\n\nNODE_API_MODULE(NODE_GYP_MODULE_NAME, Init);\n" }, { "alpha_fraction": 0.7061855792999268, "alphanum_fraction": 0.7061855792999268, "avg_line_length": 26.714284896850586, "blob_id": "3770ade97b59833d0c4dcc7258f6c4ea41acbfe2", "content_id": "f1eeae3325577e06cfda89701e1fb45412dbe7d8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 194, "license_type": "permissive", "max_line_length": 75, "num_lines": 7, "path": "/src/js/binding.js", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "import path from 'path';\nimport gyp from 'node-pre-gyp';\n\nconst binaryPath = gyp.find(path.resolve(__dirname, '..', 'package.json'));\nconst {USVFS} = require(binaryPath);\n\nexport default USVFS;\n" }, { "alpha_fraction": 0.7407407164573669, "alphanum_fraction": 0.7407407164573669, "avg_line_length": 17, "blob_id": "b3abec37488a194ff61cbf17bc3184c7eaf76e05", "content_id": "c3ad33c002a2b1c4e7555a583ba5df21bb0ef203", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 54, "license_type": "permissive", "max_line_length": 30, "num_lines": 3, "path": "/src/js/index.js", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "import USVFS from './binding';\n\nexport default USVFS;\n" }, { "alpha_fraction": 0.719252347946167, "alphanum_fraction": 0.7319626212120056, "avg_line_length": 25.22549057006836, "blob_id": "ae23a76d46f05ec801a3adbcfa84c1f34d795a1d", "content_id": "2b468d3138147ddb628167825b7270a639bc9137", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2675, "license_type": "permissive", "max_line_length": 152, "num_lines": 102, "path": "/README.md", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "# node_usvfs\nNode.js bindings for [User Space Virtual File System (USVFS)](https://github.com/modorganizer2/usvfs).\n\nThis only works on 64-bit Windows 10\n\n# Install\n```\nnpm install @stormymcstorm/node_usvfs -S\n```\n**WARNING**: if this there is not already a prebuilt binary for your system this module will take a long time to compile and will require a lot of space\n\n# Usage\n\n```js\nconst USVFS = require('@stormymcstorm/node_usvfs');\nconst path = require('path');\n\nconst vfs = new USVFS('node');\n\n// create a virtual link between directories\nvfs.linkDirectoryStatic(path.resolve('example/s'), path.resolve('example/d'));\n\n// notepad will have access to the virtual file system\nvfs.spawn('notepad.exe', () => {\n\tconsole.log('done');\n});\n```\n\n# Documentation\n\n## clearMappings()\n```\nvfs.clearMappings();\n```\nThe clear mappings method will clear all virtual links\n\n## linkFile(string src, string dest)\nCreates a virtual link to the file\n```\nconst wasLinked = vfs.linkFile(pathTosrc, pathToDest);\n```\n* `string src` the source file to link. Must be a absolute path\n* `string dest` the destination for the link\n\nReturns a `boolean` indicating whether or not the file was successfuly linked\n\n## linkDirectoryStatic(string src, string dest)\nCreates virtual links for all the files in the directory\n```\nconst wasLinked = vfs.linkDirectoryStatic(pathTosrc, pathToDest);\n```\n* `string src` the source directory to link. Must be a absolute path\n* `string dest` the destination for the link\n\nReturns a `boolean` indicating whether or not the directory was successfuly linked\n\n## disconnect()\ndisconnects from the current vfs\n```\nvfs.disconnect();\n```\n\n## spawnSync(string command)\nSpawns the given command and blocks until the process exits\n```\nvfs.spawnSync('notepad.exe');\n```\n* `string command` the command to spawn\n\n## spawn(string command, [function callback])\nSpawns the given command asynchronously\n```\nvfs.spawn('notepad.exe', () => console.log('done'));\n```\n* `string command` the command to spawn\n* `[function callback]` a optional callback to be called when the process exits\n\n# Requirements\n\n## Operating System\nWindows 10 64-bit\n\n## Software\n\n### Windows build tools\nnode-gyp requires [windows-build-tools](https://www.npmjs.com/package/windows-build-tools) to run\n\n### Visual Studio 2017\nUSVFS requires\n[Visual Studio 2017](https://visualstudio.microsoft.com/vs/) to compile.\n - Workloads\n - Desktop development with C++\n - Individual components\n - Windows 10 SDK (10.0.16299.0) for Desktop C++ [x86 and x64]\n\n# TODO\n- [ ] allow for relative paths\n- [ ] support 32 bit windows\n- [ ] add support for creating multiple virtual file systems\n\n# License\n[MIT](LICENSE)\n" }, { "alpha_fraction": 0.5498355031013489, "alphanum_fraction": 0.5776456594467163, "avg_line_length": 23.857519149780273, "blob_id": "033d50ef7b24d741274513ed7607ba081dacf777", "content_id": "a1fca73919cdf02cef207fa835d6b4e643779dba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9421, "license_type": "permissive", "max_line_length": 87, "num_lines": 379, "path": "/deps/deps.gyp", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "{\n\t\"variables\": {\n\t\t\"boost_lib\": \"<!(node -p \\\"process.env.BOOST_LIB || '../../deps/boost/stage/lib'\\\")\",\n\t\t\"boost_dir\": \"<!(node -p \\\"process.env.BOOST_DIR || 'boost'\\\")\",\n\n\t\t\"conditions\": [\n\t\t\t[\"target_arch=='x64'\", {\n\t\t\t\t\"arch\": \"x64\",\n\t\t\t}],\n\t\t\t[\"target_arch=='ia32'\", {\n\t\t\t\t\"arch\": \"x32\",\n\t\t\t}],\n\t\t],\n\t},\n\n\t\"target_defaults\": {\n\t\t\"libraries\": [\n\t\t\t\"Shlwapi.lib\",\n\t\t\t\"Version.lib\",\n\t\t\t\"<(boost_lib)/libboost_atomic-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_atomic-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_chrono-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_chrono-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_context-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_context-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_coroutine-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_coroutine-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_date_time-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_date_time-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_filesystem-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_filesystem-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_locale-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_locale-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_log-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_log-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_log_setup-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_log_setup-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_regex-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_regex-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_system-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_system-vc141-mt-sgd-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_thread-vc141-mt-s-<(arch)-1_67.lib\",\n\t\t\t\"<(boost_lib)/libboost_thread-vc141-mt-sgd-<(arch)-1_67.lib\"\n\t\t],\n\t},\n\n\t\"targets\": [\n\t\t# build shared\n\t\t{\n\t\t\t\"target_name\": \"shared\",\n\t\t\t\"type\": \"static_library\",\n\t\t\t\"dependencies\": [\n\t\t\t\t\"./usvfs_deps.gyp:fmt\",\n\t\t\t\t\"./usvfs_deps.gyp:spdlog\"\n\t\t\t],\n\t\t\t\"defines\": [\n\t\t\t\t\"_WIN64\",\n\t\t\t\t\"SPDLOG_NO_NAME\",\n\t\t\t\t\"SPDLOG_NO_REGISTRY_MUTEX\",\n\t\t\t\t\"NOMINMAX\",\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/src/shared\",\n\t\t\t\t\"usvfs/include\",\n\t\t\t\t\"<(boost_dir)\",\n\t\t\t\t\"usvfs/fmt\",\n\t\t\t\t\"usvfs/spdlog/include/spdlog\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"usvfs/src/shared/addrtools.cpp\",\n\t\t\t\t\"usvfs/src/shared/debug_monitor.cpp\",\n\t\t\t\t\"usvfs/src/shared/directory_tree.cpp\",\n\t\t\t\t\"usvfs/src/shared/exceptionex.cpp\",\n\t\t\t\t\"usvfs/src/shared/loghelpers.cpp\",\n\t\t\t\t\"usvfs/src/shared/ntdll_declarations.cpp\",\n\t\t\t\t\"usvfs/src/shared/scopeguard.cpp\",\n\t\t\t\t\"usvfs/src/shared/shmlogger.cpp\",\n\t\t\t\t\"usvfs/src/shared/stringcast_win.cpp\",\n\t\t\t\t\"usvfs/src/shared/stringutils.cpp\",\n\t\t\t\t\"usvfs/src/shared/test_helpers.cpp\",\n\t\t\t\t\"usvfs/src/shared/unicodestring.cpp\",\n\t\t\t\t\"usvfs/src/shared/wildcard.cpp\",\n\t\t\t\t\"usvfs/src/shared/winapi.cpp\",\n\t\t\t\t\"usvfs/src/shared/windows_error.cpp\"\n\t\t\t],\n\t\t\t\"configurations\": {\n\t\t\t\t\"Release\": {\n\t\t\t\t\t\"msvs_settings\": {\n\t\t\t\t\t\t\"VCCLCompilerTool\": {\n\t\t\t\t\t\t\t\"ExceptionHandling\": 1,\n\t\t\t\t\t\t\t\"RuntimeTypeInfo\": \"true\"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t\"msvs_configuration_attributes\": {\n\t\t\t\t\t\t\"CharacterSet\": 1\n\t\t\t\t\t},\n\t\t\t\t\t\"msbuild_toolset\": \"v141\",\n\t\t\t\t\t\"msvs_windows_target_platform_version\": \"10.0.16299.0\"\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\t# build thooklib\n\t\t{\n\t\t\t\"target_name\": \"thooklib\",\n\t\t\t\"type\": \"static_library\",\n\t\t\t\"dependencies\": [\n\t\t\t\t\"./usvfs_deps.gyp:asmjit\",\n\t\t\t\t\"shared\",\n\t\t\t\t\"./usvfs_deps.gyp:spdlog\"\n\t\t\t],\n\t\t\t\"defines\": [\n\t\t\t\t\"_WIN64\",\n\t\t\t\t\"ASMJIT_STATIC\",\n\t\t\t\t\"SPDLOG_NO_NAME\",\n\t\t\t\t\"SPDLOG_NO_REGISTRY_MUTEX\",\n\t\t\t\t\"NOMINMAX\",\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/src/thooklib\",\n\t\t\t\t\"usvfs/src/shared\",\n\t\t\t\t\"usvfs/src/tinjectlib\",\n\t\t\t\t\"usvfs/src/usvfs_helper\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit\",\n\t\t\t\t\"usvfs/udis86\",\n\t\t\t\t\"usvfs/include\",\n\t\t\t\t\"<(boost_dir)\",\n\t\t\t\t\"usvfs/fmt\",\n\t\t\t\t\"usvfs/spdlog/include/spdlog\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"usvfs/src/thooklib/hooklib.cpp\",\n\t\t\t\t\"usvfs/src/thooklib/ttrampolinepool.cpp\",\n\t\t\t\t\"usvfs/src/thooklib/udis86wrapper.cpp\",\n\t\t\t\t\"usvfs/src/thooklib/utility.cpp\"\n\t\t\t],\n\t\t\t\"configurations\": {\n\t\t\t\t\"Release\": {\n\t\t\t\t\t\"msvs_settings\": {\n\t\t\t\t\t\t\"VCCLCompilerTool\": {\n\t\t\t\t\t\t\t\"ExceptionHandling\": 1,\n\t\t\t\t\t\t\t\"RuntimeTypeInfo\": \"true\"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t\"msvs_configuration_attributes\": {\n\t\t\t\t\t\t\"CharacterSet\": 1\n\t\t\t\t\t},\n\t\t\t\t\t\"msbuild_toolset\": \"v141\",\n\t\t\t\t\t\"msvs_windows_target_platform_version\": \"10.0.16299.0\"\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\t# build tinjectlib\n\t\t{\n\t\t\t\"target_name\": \"tinjectlib\",\n\t\t\t\"type\": \"static_library\",\n\t\t\t\"dependencies\": [\n\t\t\t\t\"./usvfs_deps.gyp:asmjit\",\n\t\t\t\t\"shared\"\n\t\t\t],\n\t\t\t\"defines\": [\n\t\t\t\t\"_WIN64\",\n\t\t\t\t\"ASMJIT_STATIC\",\n\t\t\t\t\"SPDLOG_NO_NAME\",\n\t\t\t\t\"SPDLOG_NO_REGISTRY_MUTEX\",\n\t\t\t\t\"NOMINMAX\",\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/src/tinjectlib\",\n\t\t\t\t\"usvfs/src/shared\",\n\t\t\t\t\"usvfs/src/thooklib\",\n\t\t\t\t\"usvfs/src/usvfs_helper\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit\",\n\t\t\t\t\"usvfs/udis86\",\n\t\t\t\t\"usvfs/include\",\n\t\t\t\t\"<(boost_dir)\",\n\t\t\t\t\"usvfs/fmt\",\n\t\t\t\t\"usvfs/spdlog/include/spdlog\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"usvfs/src/tinjectlib/injectlib.cpp\"\n\t\t\t],\n\t\t\t\"configurations\": {\n\t\t\t\t\"Release\": {\n\t\t\t\t\t\"msvs_settings\": {\n\t\t\t\t\t\t\"VCCLCompilerTool\": {\n\t\t\t\t\t\t\t\"ExceptionHandling\": 1,\n\t\t\t\t\t\t\t\"RuntimeTypeInfo\": \"true\"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t\"msvs_configuration_attributes\": {\n\t\t\t\t\t\t\"CharacterSet\": 1\n\t\t\t\t\t},\n\t\t\t\t\t\"msbuild_toolset\": \"v141\",\n\t\t\t\t\t\"msvs_windows_target_platform_version\": \"10.0.16299.0\"\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\t# usvfs_helper\n\t\t{\n\t\t\t\"target_name\": \"usvfs_helper\",\n\t\t\t\"type\": \"static_library\",\n\t\t\t\"dependencies\": [\n\t\t\t\t\"shared\",\n\t\t\t\t\"tinjectlib\"\n\t\t\t],\n\t\t\t\"defines\": [\n\t\t\t\t\"BUILDING_USVFS_DLL\",\n\t\t\t\t\"_WIN64\",\n\t\t\t\t\"ASMJIT_STATIC\",\n\t\t\t\t\"SPDLOG_NO_NAME\",\n\t\t\t\t\"SPDLOG_NO_REGISTRY_MUTEX\",\n\t\t\t\t\"NOMINMAX\",\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/src/usvfs_helper\",\n\t\t\t\t\"usvfs/src/shared\",\n\t\t\t\t\"usvfs/src/thooklib\",\n\t\t\t\t\"usvfs/src/tinjectlib\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit\",\n\t\t\t\t\"usvfs/udis86\",\n\t\t\t\t\"usvfs/include\",\n\t\t\t\t\"<(boost_dir)\",\n\t\t\t\t\"usvfs/fmt\",\n\t\t\t\t\"usvfs/spdlog/include/spdlog\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"usvfs/src/usvfs_helper/inject.cpp\"\n\t\t\t],\n\t\t\t\"configurations\": {\n\t\t\t\t\"Release\": {\n\t\t\t\t\t\"msvs_settings\": {\n\t\t\t\t\t\t\"VCCLCompilerTool\": {\n\t\t\t\t\t\t\t\"ExceptionHandling\": 1,\n\t\t\t\t\t\t\t\"RuntimeTypeInfo\": \"true\"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t\"msvs_configuration_attributes\": {\n\t\t\t\t\t\t\"CharacterSet\": 1\n\t\t\t\t\t},\n\t\t\t\t\t\"msbuild_toolset\": \"v141\",\n\t\t\t\t\t\"msvs_windows_target_platform_version\": \"10.0.16299.0\"\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\t# usvfs\n\t\t{\n\t\t\t\"target_name\": \"usvfs\",\n\t\t\t\"type\": \"shared_library\",\n\t\t\t\"dependencies\": [\n\t\t\t\t\"./usvfs_deps.gyp:asmjit\",\n\t\t\t\t\"./usvfs_deps.gyp:fmt\",\n\t\t\t\t\"shared\",\n\t\t\t\t\"./usvfs_deps.gyp:spdlog\",\n\t\t\t\t\"thooklib\",\n\t\t\t\t\"tinjectlib\",\n\t\t\t\t\"./usvfs_deps.gyp:udis86\",\n\t\t\t\t\"usvfs_helper\"\n\t\t\t],\n\t\t\t\"defines\": [\n\t\t\t\t\"BUILDING_USVFS_DLL\",\n\t\t\t\t\"ASMJIT_STATIC\",\n\t\t\t\t\"SPDLOG_NO_NAME\",\n\t\t\t\t\"SPDLOG_NO_REGISTRY_MUTEX\",\n\t\t\t\t\"NOMINMAX\",\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/include\",\n\t\t\t\t\"usvfs/src/usvfs_dll\",\n\t\t\t\t\"usvfs/src/shared\",\n\t\t\t\t\"usvfs/src/thooklib\",\n\t\t\t\t\"usvfs/src/tinjectlib\",\n\t\t\t\t\"usvfs/src/usvfs_helper\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit\",\n\t\t\t\t\"usvfs/udis86\",\n\t\t\t\t\"<(boost_dir)\",\n\t\t\t\t\"usvfs/fmt\",\n\t\t\t\t\"usvfs/spdlog/include/spdlog\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"usvfs/src/usvfs_dll/hookcallcontext.cpp\",\n\t\t\t\t\"usvfs/src/usvfs_dll/hookcontext.cpp\",\n\t\t\t\t\"usvfs/src/usvfs_dll/hookmanager.cpp\",\n\t\t\t\t\"usvfs/src/usvfs_dll/hooks/kernel32.cpp\",\n\t\t\t\t\"usvfs/src/usvfs_dll/hooks/ntdll.cpp\",\n\t\t\t\t\"usvfs/src/usvfs_dll/redirectiontree.cpp\",\n\t\t\t\t\"usvfs/src/usvfs_dll/semaphore.cpp\",\n\t\t\t\t\"usvfs/src/usvfs_dll/stringcast_boost.cpp\",\n\t\t\t\t\"usvfs/src/usvfs_dll/usvfs.cpp\"\n\t\t\t],\n\t\t\t\"configurations\": {\n\t\t\t\t\"Release\": {\n\t\t\t\t\t\"msvs_settings\": {\n\t\t\t\t\t\t\"VCCLCompilerTool\": {\n\t\t\t\t\t\t\t\"ExceptionHandling\": 1,\n\t\t\t\t\t\t\t\"RuntimeTypeInfo\": \"true\"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t\"msvs_configuration_attributes\": {\n\t\t\t\t\t\t\"CharacterSet\": 1\n\t\t\t\t\t},\n\t\t\t\t\t\"msbuild_toolset\": \"v141\",\n\t\t\t\t\t\"msvs_windows_target_platform_version\": \"10.0.16299.0\"\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\n\t\t# usvfs_proxy\n\t\t{\n\t\t\t\"target_name\": \"usvfs_proxy\",\n\t\t\t\"type\": \"executable\",\n\t\t\t\"dependencies\": [\n\t\t\t\t\"./usvfs_deps.gyp:asmjit\",\n\t\t\t\t\"shared\",\n\t\t\t\t\"tinjectlib\",\n\t\t\t\t\"usvfs\",\n\t\t\t\t\"usvfs_helper\"\n\t\t\t],\n\t\t\t\"defines\": [\n\t\t\t\t\"_WIN64\",\n\t\t\t\t\"ASMJIT_STATIC\",\n\t\t\t\t\"SPDLOG_NO_NAME\",\n\t\t\t\t\"SPDLOG_NO_REGISTRY_MUTEX\",\n\t\t\t\t\"NOMINMAX\",\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/src/shared\",\n\t\t\t\t\"usvfs/src/thooklib\",\n\t\t\t\t\"usvfs/src/tinjectlib\",\n\t\t\t\t\"usvfs/src/usvfs_helper\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit\",\n\t\t\t\t\"usvfs/udis86\",\n\t\t\t\t\"usvfs/include\",\n\t\t\t\t\"<(boost_dir)\",\n\t\t\t\t\"usvfs/fmt\",\n\t\t\t\t\"usvfs/spdlog/include/spdlog\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"usvfs/src/usvfs_proxy/main.cpp\"\n\t\t\t],\n\t\t\t\"configurations\": {\n\t\t\t\t\"Release\": {\n\t\t\t\t\t\"msvs_settings\": {\n\t\t\t\t\t\t\"VCCLCompilerTool\": {\n\t\t\t\t\t\t\t\"ExceptionHandling\": 1,\n\t\t\t\t\t\t\t\"RuntimeTypeInfo\": \"true\"\n\t\t\t\t\t\t}\n\t\t\t\t\t},\n\t\t\t\t\t\"msvs_configuration_attributes\": {\n\t\t\t\t\t\t\"CharacterSet\": 1\n\t\t\t\t\t},\n\t\t\t\t\t\"msbuild_toolset\": \"v141\",\n\t\t\t\t\t\"msvs_windows_target_platform_version\": \"10.0.16299.0\"\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t]\n}\n" }, { "alpha_fraction": 0.45855194330215454, "alphanum_fraction": 0.46694648265838623, "avg_line_length": 17.6862735748291, "blob_id": "e0255ef5c67d86350d396ed200c2c09f5fe5c100", "content_id": "b4c5918eabfaff2ff20cb7625709f8b9dbf0d2c9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 953, "license_type": "permissive", "max_line_length": 90, "num_lines": 51, "path": "/binding.gyp", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "{\n\t\"variables\": {\n\t\t\"boost_lib\": \"<!(node -p \\\"process.env.BOOST_LIB || '../../../deps/boost/stage/lib'\\\")\",\n\n\t\t\"conditions\": [\n\t\t\t[\"target_arch=='x64'\", {\n\t\t\t\t\"arch\": \"x64\",\n\t\t\t}],\n\t\t\t[\"target_arch=='ia32'\", {\n\t\t\t\t\"arch\": \"x32\",\n\t\t\t}],\n\t\t],\n\t},\n\n\t\"targets\": [\n\t\t{\n\t\t\t\"target_name\": \"build_deps\",\n\t\t\t\"actions\": [\n\t\t\t\t{\n\t\t\t\t\t\"action_name\": \"get_build_deps\",\n\t\t\t\t\t\"inputs\": [],\n\t\t\t\t\t\"outputs\": [\"\"],\n\t\t\t\t\t\"action\": [\"node\", \"./get_build_deps.js\"],\n\t\t\t\t\t\"message\": \"Getting build dependencies\"\n\t\t\t\t}\n\t\t\t],\n\t\t},\n\t\t{\n\t\t\t\"target_name\": \"<(module_name)\",\n\t\t\t\"dependencies\": [\n\t\t\t\t\"build_deps\",\n\t\t\t\t\"./src/cpp/module.gyp:<(module_name)\",\n\t\t\t],\n\t\t},\n\t\t{\n\t\t\t\"target_name\": \"action_after_build\",\n\t\t\t\"type\": \"none\",\n\t\t\t\"dependencies\": [ \"<(module_name)\" ],\n\t\t\t\"copies\": [\n\t\t\t\t{\n\t\t\t\t\"files\": [\n\t\t\t\t\t\"<(PRODUCT_DIR)/<(module_name).node\",\n\t\t\t\t\t\"<(PRODUCT_DIR)/usvfs.dll\",\n\t\t\t\t\t\"<(PRODUCT_DIR)/usvfs.lib\"\n\t\t\t\t],\n\t\t\t\t\"destination\": \"<(module_path)\"\n\t\t\t\t}\n\t\t\t]\n\t\t},\n\t]\n}\n" }, { "alpha_fraction": 0.5901467800140381, "alphanum_fraction": 0.6160027980804443, "avg_line_length": 23.461538314819336, "blob_id": "71b17e3d1de2a69b5fd6dc62b7718165b5afe1d0", "content_id": "7088a894275c9ba857649d9067a50e597bc1261f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2862, "license_type": "permissive", "max_line_length": 58, "num_lines": 117, "path": "/deps/usvfs_deps.gyp", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "{\n\t\"target_defaults\": {\n\t\t\"configurations\": {\n\t\t\t\"Release\": {\n\t\t\t\t\"msvs_settings\": {\n\t\t\t\t\t\"VCCLCompilerTool\": {\n\t\t\t\t\t\t\"ExceptionHandling\": 1,\n\t\t\t\t\t\t\"RuntimeTypeInfo\": \"true\"\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t\"msvs_configuration_attributes\": {\n\t\t\t\t\t\"CharacterSet\": 1\n\t\t\t\t},\n\t\t\t\t\"msbuild_toolset\": \"v141\",\n\t\t\t\t\"msvs_windows_target_platform_version\": \"10.0.16299.0\"\n\t\t\t}\n\t\t},\n\t},\n\n\t\"targets\": [\n\t\t# build asmjit\n\t\t{\n\t\t\t\"target_name\": \"asmjit\",\n\t\t\t\"type\": \"static_library\",\n\t\t\t\"defines\": [\n\t\t\t\t\"ASMJIT_STATIC\",\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/asmjit/src/asmjit\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/assembler.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/compiler.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/compilercontext.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/constpool.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/containers.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/cpuinfo.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/globals.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/hlstream.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/logger.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/operand.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/podvector.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/runtime.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/utils.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/vmem.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/base/zone.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/x86/x86assembler.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/x86/x86compiler.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/x86/x86compilercontext.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/x86/x86compilerfunc.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/x86/x86inst.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/x86/x86operand.cpp\",\n\t\t\t\t\"usvfs/asmjit/src/asmjit/x86/x86operand_regs.cpp\"\n\t\t\t],\n\t\t},\n\n\t\t# build fmt\n\t\t{\n\t\t\t\"target_name\": \"fmt\",\n\t\t\t\"type\": \"static_library\",\n\t\t\t\"defines\": [\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/fmt\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"usvfs/fmt/fmt/format.cc\",\n\t\t\t\t\"usvfs/fmt/fmt/ostream.cc\",\n\t\t\t\t\"usvfs/fmt/fmt/posix.cc\",\n\t\t\t\t\"usvfs/fmt/fmt/printf.cc\"\n\t\t\t],\n\t\t},\n\n\t\t# build spdlog\n\t\t{\n\t\t\t\"target_name\": \"spdlog\",\n\t\t\t\"type\": \"static_library\",\n\t\t\t\"defines\": [\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/spdlog/include/spdlog\"\n\t\t\t],\n\t\t\t\"sources\": [],\n\t\t},\n\n\t\t# build udis86\n\t\t{\n\t\t\t\"target_name\": \"udis86\",\n\t\t\t\"type\": \"static_library\",\n\t\t\t\"defines\": [\n\t\t\t\t\"_WINDOWS\",\n\t\t\t\t\"NDEBUG\",\n\t\t\t\t\"BOOST_CONFIG_SUPPRESS_OUTDATED_MESSAGE\"\n\t\t\t],\n\t\t\t\"include_dirs\": [\n\t\t\t\t\"usvfs/udis86\"\n\t\t\t],\n\t\t\t\"sources\": [\n\t\t\t\t\"usvfs/udis86/libudis86/decode.c\",\n\t\t\t\t\"usvfs/udis86/libudis86/itab.c\",\n\t\t\t\t\"usvfs/udis86/libudis86/syn-att.c\",\n\t\t\t\t\"usvfs/udis86/libudis86/syn-intel.c\",\n\t\t\t\t\"usvfs/udis86/libudis86/syn.c\",\n\t\t\t\t\"usvfs/udis86/libudis86/udis86.c\"\n\t\t\t],\n\t\t},\n\t]\n}\n" }, { "alpha_fraction": 0.6220282912254333, "alphanum_fraction": 0.6536262631416321, "avg_line_length": 32.22999954223633, "blob_id": "0ce725097ee6201ac871691012aa3fd4c45ff0c1", "content_id": "105ae4e9193954a8e767dadcba007c412e139706", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6646, "license_type": "permissive", "max_line_length": 109, "num_lines": 200, "path": "/get_build_deps.js", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "const path = require('path');\nconst fs = require('fs');\nconst {spawn} = require('child_process');\nconst os = require('os');\n\nlet args = {};\n\nargs = process.argv.slice(2).reduce((argMap, arg) => {\n\tif (arg.charAt(0) === '-') {\n\t\tconst keyIndex = arg.charAt(1) == '-' ? 2 : 1;\n\t\tconst key = arg.indexOf('=') > -1 ? arg.substring(keyIndex, arg.indexOf('=')) : arg.substring(keyIndex);\n\t\tconst value = arg.indexOf('=') > -1 ? arg.substring(arg.indexOf('=') + 1) : true;\n\n\t\targMap[key] = value;\n\t\treturn argMap;\n\t}\n\n\targMap.unknown.push(arg);\n\treturn argMap;\n}, {\n\tunknown: [],\n});\n\n// clone deps\nconst usvfsPath = path.resolve(__dirname, \"deps\", \"usvfs\");\nconst boostPath = path.resolve(__dirname, \"deps\", \"boost\");\nconst udis86Path = path.resolve(usvfsPath, \"udis86\");\n\nconst boostLibArch = (args.arch || os.arch()) == 'x64' ? '64' : '32';\nconst boostBackupLib = path.resolve(boostPath, 'stage', 'lib');\nconst boostLib = process.env.BOOST_LIB || boostBackupLib;\n\nconsole.log(`Checking if boost is ready to use at ${boostLib}`);\nisBoostReady(boostLib)\n\t.then(isReady => {\n\t\tif (! isReady) {\n\t\t\tconsole.log(`Did not find boost libraries for x${boostLibArch}`);\n\t\t\treturn buildLocalBoost();\n\t\t}\n\t})\n\t.then(() => {\n\t\tconsole.log('boost is ready');\n\t\tconsole.log('Checking if deps/usvfs is cloned');\n\t\treturn isCloned(usvfsPath);\n\t})\n\t.then(usvfsIsCloned => {\n\t\tif (! usvfsIsCloned) {\n\t\t\tconsole.log('Cloning deps/usvfs');\n\t\t\treturn clone('deps/usvfs')\n\t\t}\n\t})\n\t.then(() => {\n\t\tconsole.log('deps/usvfs is cloned');\n\t\tconsole.log('Checking if udis86 is ready');\n\t\treturn allExist([\n\t\t\tpath.resolve(udis86Path, \"libudis86/decode.c\"),\n\t\t\tpath.resolve(udis86Path, \"libudis86/itab.c\"),\n\t\t\tpath.resolve(udis86Path, \"libudis86/syn-att.c\"),\n\t\t\tpath.resolve(udis86Path, \"libudis86/syn-intel.c\"),\n\t\t\tpath.resolve(udis86Path, \"libudis86/syn.c\"),\n\t\t\tpath.resolve(udis86Path, \"libudis86/udis86.c\"),\n\t\t]);\n\t})\n\t.then(udis86IsReady => {\n\t\tif (! udis86IsReady) {\n\t\t\tconsole.log('Building udis86');\n\t\t\treturn spawnCommand('python', [\"scripts/ud_itab.py\", \"docs/x86/optable.xml\", \"libudis86\"], {\n\t\t\t\tcwd: udis86Path,\n\t\t\t\tstdio: 'inherit',\n\t\t\t});\n\t\t}\n\n\t\tconsole.log('udis86 is ready');\n\t})\n\t.then(() => {\n\t\tconsole.log('Build depedencies are ready');\n\t\tprocess.exit(0);\n\t})\n\t.catch((err) => {\n\t\tconsole.error(err);\n\t\tprocess.exit(err.code || 1);\n\t});\n\nfunction buildLocalBoost() {\n\tconsole.log('Checking if deps/boost is cloned');\n\treturn isCloned(boostPath)\n\t\t.then(boostIsCloned => {\n\t\t\tif (! boostIsCloned) {\n\t\t\t\tconsole.log('Cloning deps/boost');\n\t\t\t\treturn clone('deps/boost');\n\t\t\t}\n\t\t})\n\t\t.then(() => {\n\t\t\tconsole.log('deps/boost is cloned');\n\t\t\tconsole.log('Building boost');\n\t\t\treturn spawnCommand(\".\\\\bootstrap.bat\", {cwd: boostPath, stdio: 'inherit'})\n\t\t\t\t.then(() => spawnCommand(\".\\\\b2.exe\", [\n\t\t\t\t\t\"--with-date_time\",\n\t\t\t\t\t\"--with-coroutine\",\n\t\t\t\t\t\"--with-filesystem\",\n\t\t\t\t\t\"--with-thread\",\n\t\t\t\t\t\"--with-log\",\n\t\t\t\t\t\"--with-locale\",\n\t\t\t\t\t`address-model=${boostLibArch}`,\n\t\t\t\t\t\"architecture=x86\",\n\t\t\t\t\t\"link=static\",\n\t\t\t\t\t\"runtime-link=static\"\n\t\t\t\t], {cwd: boostPath, stdio: 'inherit'}));\n\t\t});\n}\n\nfunction clone(modulePath) {\n\treturn spawnCommand(\"git\", [\"submodule\", \"update\", \"--init\", \"--recursive\", \"--\", modulePath], {\n\t\tcwd: __dirname,\n\t\tstdio: 'inherit',\n\t});\n}\n\nfunction isBoostReady(lib) {\n\treturn allExist([\n\t\tpath.resolve(boostLib, `libboost_atomic-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_atomic-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_chrono-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_chrono-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_context-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_context-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_coroutine-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_coroutine-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_date_time-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_date_time-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_filesystem-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_filesystem-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_locale-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_locale-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_log-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_log-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_log_setup-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_log_setup-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_regex-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_regex-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_system-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_system-vc141-mt-sgd-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_thread-vc141-mt-s-x${boostLibArch}-1_67.lib`),\n\t\tpath.resolve(boostLib, `libboost_thread-vc141-mt-sgd-x${boostLibArch}-1_67.lib`)\n\t]);\n}\n\nfunction isCloned(path) {\n\treturn allExist([path])\n\t\t.then(exists => {\n\t\t\tif (exists) return isEmpty(path).then(empty => ! empty);\n\t\t\treturn false;\n\t\t});\n}\n\nfunction spawnCommand(...args) {\n\treturn new Promise((resolve, reject) => {\n\t\tconst p = spawn(...args);\n\n\t\tp.once('close', code => {\n\t\t\tp.removeAllListeners();\n\t\t\tif (code != 0) reject(new Error(`'${args.join(' ')}' exited with a non-zero code`));\n\t\t\telse resolve();\n\t\t});\n\n\t\tp.once('error', err => {\n\t\t\tp.removeAllListeners();\n\t\t\treject(err);\n\t\t});\n\t});\n}\n\nfunction isEmpty(path) {\n\treturn new Promise((resolve, reject) => {\n\t\tfs.stat(path, (err, stats) => {\n\t\t\tif (err) reject(err);\n\t\t\telse if (! stats.isDirectory()) reject(`${path} is not a directory`);\n\t\t\telse fs.readdir(path, (err, files) => {\n\t\t\t\tif (err) reject(err);\n\t\t\t\telse resolve(! files.filter(name => name != \".git\").length);\n\t\t\t});\n\t\t});\n\t});\n}\n\nfunction allExist(files) {\n\tconst proms = files.map(file => new Promise((resolve, reject) => fs.access(file, fs.constants.F_OK, err => {\n\t\tif (err) {\n\t\t\tif (err.code === 'ENOENT') resolve(false);\n\t\t\telse reject(err);\n\t\t}\n\t\telse resolve(true);\n\t})).then(found => {\n\t\tif (found) console.log('\\tfound ' + file);\n\t\telse console.log('\\tnot found ' + file);\n\t\treturn found;\n\t}));\n\n\treturn Promise.all(proms).then(results => results.every(exists => exists));\n}\n" }, { "alpha_fraction": 0.676221489906311, "alphanum_fraction": 0.6843183040618896, "avg_line_length": 25.465517044067383, "blob_id": "336e16d16b5daf01004c01fa85c174fcf761aac0", "content_id": "1974c41da81415fca7cc92d4915d1ff8c74abf29", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 10745, "license_type": "permissive", "max_line_length": 117, "num_lines": 406, "path": "/src/cpp/node_usvfs.cc", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "#include <node_usvfs.h>\n#include <iostream>\n#include <exception>\n\n#define NAPI_EXPERIMENTAL\n\nnamespace node_usvfs {\n\n// Utilities\nstruct SpawnOptions {\n\tNapi::String cwd;\n\tNapi::Object env;\n};\n\nLPWSTR stringToLPWSTR(const char* str) {\n\tint n = MultiByteToWideChar(CP_UTF8, 0, str, -1, NULL, 0); // get length\n\n\tLPWSTR result = new TCHAR[n];\n\tMultiByteToWideChar(CP_UTF8, 0, str, -1, result, n);\n\n\treturn result;\n}\n\n// TODO: implment env and cwd options\nbool spawnHookedProcess(Napi::String command, SpawnOptions opts, STARTUPINFO *si, PROCESS_INFORMATION *pi) {\n\t// clean startup and process info\n\tZeroMemory(si, sizeof(*si));\n\t(*si).cb = sizeof(si);\n\n\tZeroMemory(pi, sizeof(*pi));\n\n\t// create process\n\treturn CreateProcessHooked(\n\t\tNULL,\n\t\tstringToLPWSTR(command.Utf8Value().c_str()),\n\t\tNULL,\n\t\tNULL,\n\t\tFALSE,\n\t\tNULL,\n\t\tNULL,\n\t\tNULL,\n\t\tsi,\n\t\tpi\n\t);\n}\n\nSpawnOptions getSpawnOptions(Napi::Env env, Napi::Object opts) {\n\tSpawnOptions options;\n\tNapi::Value tmp;\n\n\tif (opts.Has(\"cwd\")) {\n\t\ttmp = opts.Get(\"cwd\");\n\n\t\tif (! tmp.IsString()) Napi::Error::New(env, \"cwd must be a string\").ThrowAsJavaScriptException();\n\n\t\toptions.cwd = tmp.As<Napi::String>();\n\t}\n\n\tif (opts.Has(\"env\")) {\n\t\ttmp = opts.Get(\"env\");\n\n\t\tif (! tmp.IsObject()) Napi::Error::New(env, \"env must be a object\").ThrowAsJavaScriptException();\n\n\t\toptions.env = tmp.As<Napi::Object>();\n\t}\n\n\treturn options;\n}\n\nvoid throwLastError(Napi::Env env, const char* format) {\n\tchar message [200];\n\tint lastError = GetLastError();\n\n\tsprintf(message, format, lastError);\n\tNapi::Error::New(env, message).ThrowAsJavaScriptException();\n}\n\n// Module functions\nNapi::FunctionReference USVFS::constructor;\n\nNapi::Object USVFS::Init(Napi::Env env, Napi::Object exports) {\n\tNapi::HandleScope scope(env);\n\n\tNapi::Function func = DefineClass(env, \"USVFS\", {\n\t\tInstanceMethod(\"ClearMappings\", &USVFS::clearMappings),\n\t\tInstanceMethod(\"LinkFile\", &USVFS::linkFile),\n\t\tInstanceMethod(\"LinkDirectoryStatic\", &USVFS::linkDirectoryStatic),\n\t\tInstanceMethod(\"SpawnSync\", &USVFS::spawnSync),\n\t\tInstanceMethod(\"Spawn\", &USVFS::spawn),\n\t\tInstanceMethod(\"Disconnect\", &USVFS::disconnect),\n\t});\n\n\tconstructor = Napi::Persistent(func);\n\tconstructor.SuppressDestruct();\n\n\texports.Set(\"USVFS\", func);\n\treturn exports;\n}\n\nUSVFS::USVFS(const Napi::CallbackInfo& info) : Napi::ObjectWrap<USVFS>(info) {\n\tNapi::Env env = info.Env();\n\tNapi::HandleScope scope(env);\n\n\tint length = info.Length();\n\n\tif (length <= 0 || (! info[0].IsString() && ! info[0].IsObject())) {\n\t\tNapi::TypeError::New(env, \"USVFS only accepts a instanceName or a parameters object\").ThrowAsJavaScriptException();\n\t}\n\n\tconst char* instanceName;\n\tbool debugMode = false;\n\tLogLevel logLevel = LogLevel::Info;\n\tCrashDumpsType crashDumpsType = CrashDumpsType::None;\n\tconst char* crashDumpsPath = \"\";\n\n\tNapi::Value arg0 = info[0];\n\n\tif (arg0.IsString()) {\n\t\tinstanceName = arg0.As<Napi::String>().Utf8Value().c_str();\n\t} else {\n\t\tNapi::Object params = arg0.As<Napi::Object>();\n\t\tNapi::Value tmp;\n\n\t\t// get instanceName property\n\t\tif (! params.Has(\"instanceName\"))\n\t\t Napi::TypeError::New(env, \"params must contain the instanceName property\").ThrowAsJavaScriptException();\n\n\t\ttmp = params.Get(\"instanceName\");\n\n\t\tif (! tmp.IsString())\n\t\t\tNapi::TypeError::New(env, \"instanceName must be a string\").ThrowAsJavaScriptException();\n\n\t\tinstanceName = tmp.As<Napi::String>().Utf8Value().c_str();\n\n\t\t// get debugMode property\n\t\tif (params.Has(\"debugMode\")) {\n\t\t\ttmp = params.Get(\"debugMode\");\n\n\t\t\tif (! tmp.IsBoolean())\n\t\t\t\tNapi::TypeError::New(env, \"debugMode must be a boolean\").ThrowAsJavaScriptException();\n\n\t\t\tdebugMode = tmp.As<Napi::Boolean>().Value();\n\t\t}\n\n\t\t// get logLevel property\n\t\tif (params.Has(\"logLevel\")) {\n\t\t\ttmp = params.Get(\"logLevel\");\n\n\t\t\tif (! tmp.IsNumber())\n\t\t\t\tNapi::TypeError::New(env, \"logLevel must be a number\").ThrowAsJavaScriptException();\n\n\t\t\tint level = tmp.As<Napi::Number>().Int32Value();\n\n\t\t\tif (level < 0 || level > 3)\n\t\t\t\tNapi::RangeError::New(env, \"logLevel must be between 0 and 3\").ThrowAsJavaScriptException();\n\n\t\t\tswitch (level) {\n\t\t\tcase 0:\n\t\t\t\tlogLevel = LogLevel::Error;\n\t\t\t\tbreak;\n\t\t\tcase 1:\n\t\t\t\tlogLevel = LogLevel::Warning;\n\t\t\t\tbreak;\n\t\t\tcase 2:\n\t\t\t\tlogLevel = LogLevel::Info;\n\t\t\t\tbreak;\n\t\t\tcase 3:\n\t\t\t\tlogLevel = LogLevel::Debug;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\n\t\t// get crashDumpsType and crashDumpsPath properties\n\t\tif (params.Has(\"crashDumpsType\")) {\n\t\t\ttmp = params.Get(\"crashDumpsType\");\n\n\t\t\tif (! tmp.IsNumber())\n\t\t\t\tNapi::TypeError::New(env, \"crashDumpsType must be a number\");\n\n\t\t\tint type = tmp.As<Napi::Number>().Int32Value();\n\n\t\t\tif (type < 0 || type > 4)\n\t\t\t\tNapi::RangeError::New(env, \"crashDumpsType must be between 0 and 3\");\n\n\t\t\tswitch (type) {\n\t\t\tcase 0:\n\t\t\t\tcrashDumpsType = CrashDumpsType::None;\n\t\t\t\tbreak;\n\t\t\tcase 1:\n\t\t\t\tcrashDumpsType = CrashDumpsType::Mini;\n\t\t\t\tbreak;\n\t\t\tcase 2:\n\t\t\t\tcrashDumpsType = CrashDumpsType::Data;\n\t\t\t\tbreak;\n\t\t\tcase 3:\n\t\t\t\tcrashDumpsType = CrashDumpsType::Full;\n\t\t\t\tbreak;\n\t\t\t}\n\n\t\t\tif (crashDumpsType != CrashDumpsType::None) {\n\t\t\t\tif (! params.Has(\"crashDumpsPath\"))\n\t\t\t\t\tNapi::Error::New(env, \"params must specifiy a crashDumpsPath when crashDumpsType is not none\")\n\t\t\t\t\t\t.ThrowAsJavaScriptException();\n\n\t\t\t\ttmp = params.Get(\"crashDumpsPath\");\n\n\t\t\t\tif (! tmp.IsString())\n\t\t\t\t\tNapi::TypeError::New(env, \"crashDumpsPath must be a string\");\n\n\t\t\t\tcrashDumpsPath = tmp.As<Napi::String>().Utf8Value().c_str();\n\t\t\t}\n\t\t}\n\t}\n\n\tUSVFSInitParameters(&this->params_, instanceName, debugMode, logLevel, crashDumpsType, crashDumpsPath);\n\n\tbool worked;\n\n\tworked = CreateVFS(&this->params_);\n\n\tif (! worked)\n\t\tNapi::Error::New(env, \"Failed to create new VFS\").ThrowAsJavaScriptException();\n}\n\nNapi::Value USVFS::clearMappings(const Napi::CallbackInfo& info) {\n\tClearVirtualMappings();\n\n\treturn info.This();\n}\n\nNapi::Value USVFS::linkFile(const Napi::CallbackInfo& info) {\n\tNapi::Env env = info.Env();\n\tNapi::HandleScope scope(env);\n\n\tint length = info.Length();\n\n\tif (length < 2)\n\t\tNapi::Error::New(env, \"USVFS#LinkFile requires a source and destination\").ThrowAsJavaScriptException();\n\n\tif (! info[0].IsString())\n\t\tNapi::TypeError::New(env, \"source must be a string\").ThrowAsJavaScriptException();\n\n\tif (! info[1].IsString())\n\t\tNapi::TypeError::New(env, \"dest must be a string\").ThrowAsJavaScriptException();\n\n\tLPWSTR source = stringToLPWSTR(info[0].As<Napi::String>().Utf8Value().c_str());\n\tLPWSTR dest = stringToLPWSTR(info[1].As<Napi::String>().Utf8Value().c_str());\n\tunsigned int flags = NULL;\n\n\tif (length > 2) {\n\t\tif (! info[2].IsNumber())\n\t\t\tNapi::TypeError::New(env, \"flags must be a number\");\n\n\t\tflags = info[2].As<Napi::Number>().Int32Value();\n\t}\n\n\tbool worked = VirtualLinkFile(source, dest, flags);\n\n\treturn Napi::Boolean::New(env, worked);\n}\n\nNapi::Value USVFS::linkDirectoryStatic(const Napi::CallbackInfo& info) {\n\tNapi::Env env = info.Env();\n\tNapi::HandleScope scope(env);\n\n\tint length = info.Length();\n\n\tif (length < 2)\n\t\tNapi::Error::New(env, \"USVFS#LinkDirectoryStatic requires a source and destination\").ThrowAsJavaScriptException();\n\n\tif (! info[0].IsString())\n\t\tNapi::TypeError::New(env, \"source must be a string\").ThrowAsJavaScriptException();\n\n\tif (! info[1].IsString())\n\t\tNapi::TypeError::New(env, \"dest must be a string\").ThrowAsJavaScriptException();\n\n\tLPWSTR source = stringToLPWSTR(info[0].As<Napi::String>().Utf8Value().c_str());\n\tLPWSTR dest = stringToLPWSTR(info[1].As<Napi::String>().Utf8Value().c_str());\n\tunsigned int flags = NULL;\n\n\tif (length > 2) {\n\t\tif (! info[2].IsNumber())\n\t\t\tNapi::TypeError::New(env, \"flags must be a number\");\n\n\t\tflags = info[2].As<Napi::Number>().Int32Value();\n\t}\n\n\tbool worked = VirtualLinkDirectoryStatic(source, dest, flags);\n\n\treturn Napi::Boolean::New(env, worked);\n}\n\nNapi::Value USVFS::disconnect(const Napi::CallbackInfo& info) {\n\tDisconnectVFS();\n\n\treturn info.This();\n}\n\nNapi::Value USVFS::spawnSync(const Napi::CallbackInfo& info) {\n\tNapi::Env env = info.Env();\n\tNapi::HandleScope scope(env);\n\tSpawnOptions options;\n\n\t// get arguments\n\tint length = info.Length();\n\n\tif (length < 1)\n\t\tNapi::Error::New(env, \"USVFS#SpawnSync requires a command\").ThrowAsJavaScriptException();\n\n\tif (! info[0].IsString())\n\t\tNapi::TypeError::New(env, \"command must be a string\").ThrowAsJavaScriptException();\n\n\tif (length > 1) {\n\t\tif (info[1].IsObject()) options = getSpawnOptions(env, info[1].As<Napi::Object>());\n\t\telse Napi::Error::New(env, \"SpawnSync only accepts spawn options\").ThrowAsJavaScriptException();\n\t}\n\n\t// spawn process\n\tSTARTUPINFO si;\n\tPROCESS_INFORMATION pi;\n\n\tbool worked = spawnHookedProcess(info[0].As<Napi::String>(), options, &si, &pi);\n\n\tif (! worked)\n\t\tthrowLastError(env, \"USVFS::SpawnSync failed (%d)\");\n\n\t// wait for process to exit\n\tWaitForSingleObject(pi.hProcess, INFINITE);\n\n\t// close handles\n\tCloseHandle(pi.hProcess);\n\tCloseHandle(pi.hThread);\n\n\treturn Napi::Value();\n}\n\nNapi::Value USVFS::spawn(const Napi::CallbackInfo& info) {\n\tNapi::Env env = info.Env();\n\tNapi::HandleScope scope(env);\n\tSpawnOptions options;\n\n\tbool hasCallback = false;\n\tNapi::Function callback;\n\n\tLPWSTR command;\n\n\t// get arguments\n\tint length = info.Length();\n\n\tif (length < 1)\n\t\tNapi::Error::New(env, \"USVFS#Spawn requires a command\").ThrowAsJavaScriptException();\n\n\tif (! info[0].IsString())\n\t\tNapi::TypeError::New(env, \"command must be a string\");\n\n\tif (length > 1) {\n\t\tif (info[1].IsFunction()) {\n\t\t\thasCallback = true;\n\t\t\tcallback = info[1].As<Napi::Function>();\n\t\t} else if(info[1].IsObject()) options = getSpawnOptions(env, info[1].As<Napi::Object>());\n\t\telse Napi::Error::New(env, \"Spawn only accepts spawn options and or a callback\").ThrowAsJavaScriptException();\n\t}\n\n\tif (length > 2) {\n\t\tif (info[2].IsFunction()) {\n\t\t\thasCallback = true;\n\t\t\tcallback = info[2].As<Napi::Function>();\n\t\t} else if(info[2].IsObject()) options = getSpawnOptions(env, info[2].As<Napi::Object>());\n\t\telse Napi::Error::New(env, \"Spawn only accepts spawn options and or a callback\").ThrowAsJavaScriptException();\n\t}\n\n\tcommand = stringToLPWSTR(info[0].As<Napi::String>().Utf8Value().c_str());\n\n\t// spawn process\n\tSTARTUPINFO si;\n\tPROCESS_INFORMATION pi;\n\n\tbool worked = spawnHookedProcess(info[0].As<Napi::String>(), options, &si, &pi);\n\n\tif (! worked)\n\t\tthrowLastError(env, \"USVFS::SpawnSync failed (%d)\");\n\n\t// wait for process to exit\n\tif (hasCallback) {\n\t\tHANDLE waitHandle;\n\t\tThreadSafeCallback* ts_cb = new ThreadSafeCallback(callback);\n\n\t\tWAITORTIMERCALLBACK onExit = [](void* vPtr, BOOLEAN timedOut) {\n\t\t\tThreadSafeCallback* ts_cb = static_cast<ThreadSafeCallback*>(vPtr);\n\n\t\t\tts_cb->call();\n\n\t\t\tdelete ts_cb;\n\t\t};\n\n\t\tRegisterWaitForSingleObject(&waitHandle, pi.hProcess, onExit, ts_cb, INFINITE, WT_EXECUTEONLYONCE);\n\t}\n\n\t// close handles\n\tCloseHandle(pi.hProcess);\n\tCloseHandle(pi.hThread);\n\n\treturn info.This();\n}\n\n} // namespace node_usvfs\n" }, { "alpha_fraction": 0.7437499761581421, "alphanum_fraction": 0.7450000047683716, "avg_line_length": 25.66666603088379, "blob_id": "5528a6f5e76000cee67a9442c7c4bcce8c19a27a", "content_id": "fac71a59f29cd9dad57661d2f07a1ab2f215a616", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 800, "license_type": "permissive", "max_line_length": 66, "num_lines": 30, "path": "/src/cpp/node_usvfs.h", "repo_name": "rostrage/node_usvfs", "src_encoding": "UTF-8", "text": "#pragma once\n\n#define NAPI_VERSION 3\n\n#include <napi.h>\n#include <usvfs.h>\n#include <usvfsparameters.h>\n#include \"napi-thread-safe-callback.hpp\"\n\nnamespace node_usvfs {\n\nclass USVFS : public Napi::ObjectWrap<USVFS> {\n\tpublic:\n\t\tstatic Napi::Object Init(Napi::Env env, Napi::Object exports);\n\t\tUSVFS(const Napi::CallbackInfo& info);\n\n\tprivate:\n\t\tstatic Napi::FunctionReference constructor;\n\n\t\tNapi::Value clearMappings(const Napi::CallbackInfo& info);\n\t\tNapi::Value linkFile(const Napi::CallbackInfo& info);\n\t\tNapi::Value linkDirectoryStatic(const Napi::CallbackInfo& info);\n\t\tNapi::Value disconnect(const Napi::CallbackInfo& info);\n\t\tNapi::Value spawnSync(const Napi::CallbackInfo& info);\n\t\tNapi::Value spawn(const Napi::CallbackInfo& info);\n\n\t\tUSVFSParameters params_;\n};\n\n} // namespace node_usvfs\n" } ]
11
Anton-2/Adafruit_CircuitPython_WSGI
https://github.com/Anton-2/Adafruit_CircuitPython_WSGI
624dfcc574e1229c08f664359114c3a56a7051b7
fab6cbe73f43f85f085b91537afc943de31055e9
12ca307494860dc7e8e534f74e7a78a5893c502f
refs/heads/master
2022-11-30T09:15:29.552605
2020-07-08T20:49:05
2020-07-08T20:49:05
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6343395709991455, "alphanum_fraction": 0.6377649307250977, "avg_line_length": 35.20930099487305, "blob_id": "e1c2013b6723ecfa45175ef47fe38123dbe9f407", "content_id": "1747d735de8b9c070559cfc8825724582a4fc161", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4671, "license_type": "permissive", "max_line_length": 92, "num_lines": 129, "path": "/adafruit_wsgi/wsgi_app.py", "repo_name": "Anton-2/Adafruit_CircuitPython_WSGI", "src_encoding": "UTF-8", "text": "# The MIT License (MIT)\n#\n# Copyright (c) 2019 Matthew Costi for Adafruit Industries\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\"\"\"\n`wsgi_app`\n================================================================================\n\nCircuitPython framework for creating WSGI server compatible web applications.\nThis does *not* include server implementation, which is necessary in order\nto create a web application with this library.\n\n* Circuit Python implementation of an WSGI Server for ESP32 devices:\n https://github.com/adafruit/Adafruit_CircuitPython_ESP32SPI.git\n\n\n* Author(s): Matthew Costi\n\nImplementation Notes\n--------------------\n\n**Software and Dependencies:**\n\n* Adafruit CircuitPython firmware for the supported boards:\n https://github.com/adafruit/circuitpython/releases\n\n\"\"\"\n\nimport re\n\nfrom adafruit_wsgi.request import Request\n\n__version__ = \"0.0.0-auto.0\"\n__repo__ = \"https://github.com/adafruit/Adafruit_CircuitPython_WSGI.git\"\n\n\nclass WSGIApp:\n \"\"\"\n The base WSGI Application class.\n \"\"\"\n\n def __init__(self):\n self._routes = []\n self._variable_re = re.compile(\"^<([a-zA-Z]+)>$\")\n\n def __call__(self, environ, start_response):\n \"\"\"\n Called whenever the server gets a request.\n The environ dict has details about the request per wsgi specification.\n Call start_response with the response status string and headers as a list of tuples.\n Return a single item list with the item being your response data string.\n \"\"\"\n\n status = \"\"\n headers = []\n resp_data = []\n\n request = Request(environ)\n\n match = self._match_route(request.path, request.method.upper())\n\n if match:\n args, route = match\n status, headers, resp_data = route[\"func\"](request, *args)\n\n start_response(status, headers)\n return resp_data\n\n def on_request(self, methods, rule, request_handler):\n \"\"\"\n Register a Request Handler for a particular HTTP method and path.\n request_handler will be called whenever a matching HTTP request is received.\n\n request_handler should accept the following args:\n (Dict environ)\n request_handler should return a tuple in the shape of:\n (status, header_list, data_iterable)\n\n :param list methods: the methods of the HTTP request to handle\n :param str rule: the path rule of the HTTP request\n :param func request_handler: the function to call\n \"\"\"\n regex = \"^\"\n rule_parts = rule.split(\"/\")\n for part in rule_parts:\n var = self._variable_re.match(part)\n if var:\n # If named capture groups ever become a thing, use this regex instead\n # regex += \"(?P<\" + var.group(\"var\") + r\">[a-zA-Z0-9_-]*)\\/\"\n regex += r\"([a-zA-Z0-9_-]+)\\/\"\n else:\n regex += part + r\"\\/\"\n regex += \"?$\" # make last slash optional and that we only allow full matches\n self._routes.append(\n (re.compile(regex), {\"methods\": methods, \"func\": request_handler})\n )\n\n def route(self, rule, methods=None):\n \"\"\"\n A decorator to register a route rule with an endpoint function.\n if no methods are provided, default to GET\n \"\"\"\n if not methods:\n methods = [\"GET\"]\n return lambda func: self.on_request(methods, rule, func)\n\n def _match_route(self, path, method):\n for matcher, route in self._routes:\n match = matcher.match(path)\n if match and method in route[\"methods\"]:\n return (match.groups(), route)\n return None\n" }, { "alpha_fraction": 0.6088753938674927, "alphanum_fraction": 0.6110804677009583, "avg_line_length": 30.008546829223633, "blob_id": "46f26d6bbf4fe0d773519f6c6cdb1efbbcaed801", "content_id": "c6a22ff121def11925076b3983dccc5ea79a7f50", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3628, "license_type": "permissive", "max_line_length": 87, "num_lines": 117, "path": "/adafruit_wsgi/request.py", "repo_name": "Anton-2/Adafruit_CircuitPython_WSGI", "src_encoding": "UTF-8", "text": "# The MIT License (MIT)\n#\n# Copyright (c) 2019 Matthew Costi for Adafruit Industries\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\n\"\"\"\n`Request`\n================================================================================\n\n\n* Author(s): Matthew Costi\n\"\"\"\nimport re\n\n\nclass Request:\n \"\"\"\n An incoming HTTP request.\n A higher level abstraction of the raw WSGI Environ dictionary.\n \"\"\"\n\n def __init__(self, environ):\n self._method = environ[\"REQUEST_METHOD\"]\n self._path = environ[\"PATH_INFO\"]\n self._query_params = self.__parse_query_params(environ.get(\"QUERY_STRING\", \"\"))\n self._headers = self.__parse_headers(environ)\n self._body = environ[\"wsgi.input\"]\n self._wsgi_environ = environ\n\n @property\n def method(self):\n \"\"\"\n the HTTP Method Type of this request\n \"\"\"\n return self._method\n\n @property\n def path(self):\n \"\"\"\n the path this request was made to\n \"\"\"\n return self._path\n\n @property\n def query_params(self):\n \"\"\"\n Request query parameters, represented as a dictionary of\n param name to param value\n \"\"\"\n return self._query_params\n\n @property\n def headers(self):\n \"\"\"\n Request headers, represented as a dictionary of\n header name to header value\n \"\"\"\n return self._headers\n\n @property\n def body(self):\n \"\"\"\n The Request Body\n \"\"\"\n return self._body\n\n @property\n def wsgi_environ(self):\n \"\"\"\n The raw WSGI Environment dictionary representation of the request\n \"\"\"\n return self._wsgi_environ\n\n @staticmethod\n def __parse_query_params(query_string):\n param_list = query_string.split(\"&\")\n params = {}\n for param in param_list:\n key_val = param.split(\"=\")\n if len(key_val) == 2:\n params[key_val[0]] = key_val[1]\n return params\n\n @staticmethod\n def __parse_headers(environ):\n headers = {}\n\n # Content Type and Content Length headers\n # are stored in environ differently than other headers\n if \"CONTENT_TYPE\" in environ:\n headers[\"content-type\"] = environ[\"CONTENT_TYPE\"]\n if \"CONTENT_LENGTH\" in environ:\n headers[\"content-length\"] = environ[\"CONTENT_LENGTH\"]\n\n env_header_re = re.compile(r\"HTTP_(.+)\")\n for key, val in environ.items():\n header = env_header_re.match(key)\n if header:\n headers[header.group(1).replace(\"_\", \"-\").lower()] = val\n return headers\n" } ]
2
Quanquanzhao/python
https://github.com/Quanquanzhao/python
0806da722034e72ca894e26e05fe8c04e68feac0
1955762857617a36189f6836ab4a11292146fb75
d37877141562f9c739eaccfb0db39e6c7be2ae2e
refs/heads/master
2020-09-04T21:58:02.897816
2018-02-23T12:39:45
2018-02-23T12:39:45
94,412,724
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.3656998872756958, "alphanum_fraction": 0.37452712655067444, "avg_line_length": 34.04545593261719, "blob_id": "fa9b4e1751b923c498aa96f29b2a908d475f038a", "content_id": "846f61049cbc0544c67c495591d1687346bc227c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 793, "license_type": "no_license", "max_line_length": 75, "num_lines": 22, "path": "/matchfile.py", "repo_name": "Quanquanzhao/python", "src_encoding": "UTF-8", "text": "# -*- coding:utf-8 -*-\r\n\r\nimport os\r\n\r\nif __name__ == '__main__':\r\n fname = \"C:\\code\\\\1.txt\"\r\n fnamesrc = \"C:\\code\\\\app.txt\"\r\n fdst = \"C:\\code\\\\2.txt\"\r\n with open(fname, 'r') as fobj:\r\n with open(fdst, 'a+') as ft:\r\n for eachline in fobj:\r\n flag = 0\r\n eachline = eachline.lower().rstrip().lstrip()\r\n with open(fnamesrc, 'r') as fsrc:\r\n for src in fsrc:\r\n if src.lower().find(eachline) != -1:\r\n newline = eachline + '\\t' + src.rstrip() + '\\n'\r\n ft.write(newline)\r\n flag = 1\r\n break\r\n if flag == 0:\r\n ft.write(eachline + '\\n')\r\n" } ]
1
tanveerkn/threeway-handshake-protocol-implementation
https://github.com/tanveerkn/threeway-handshake-protocol-implementation
02ab8367313550a93b3f60219d7df7dbd35f43d8
a197ba866e7ff9cf70b57cc815bf3dc454b17eda
68fecec68161cf7d878f6357ffc25cdc1166d987
refs/heads/master
2021-09-01T01:33:58.603815
2017-12-24T05:42:42
2017-12-24T05:42:42
114,562,929
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6323529481887817, "alphanum_fraction": 0.6617646813392639, "avg_line_length": 20.66666603088379, "blob_id": "7dd098c71347cd0a368cbeef6a798f4fbf87a966", "content_id": "44e92455524f5470216ed34bd3297b922cc7b8cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 136, "license_type": "no_license", "max_line_length": 45, "num_lines": 6, "path": "/test.py", "repo_name": "tanveerkn/threeway-handshake-protocol-implementation", "src_encoding": "UTF-8", "text": "from Voter import Voter\r\nimport random\r\n\r\nfor no in range(10):\r\n new_voter = Voter()\r\n new_voter.cast_vote(random.randint(0, 1))\r\n" }, { "alpha_fraction": 0.5248756408691406, "alphanum_fraction": 0.5298507213592529, "avg_line_length": 24.799999237060547, "blob_id": "0cfa7f0031e36e49bd4e7b1eede25a9566ec8b3a", "content_id": "72b2d14e549ac28c78737b32a55a8aad0e8484fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 402, "license_type": "no_license", "max_line_length": 48, "num_lines": 15, "path": "/Voter.py", "repo_name": "tanveerkn/threeway-handshake-protocol-implementation", "src_encoding": "UTF-8", "text": "from Verifier import Verifier\r\nfrom Counter import Counter\r\n\r\nclass Voter():\r\n __v_server = Verifier()\r\n __c_server = Counter()\r\n\r\n def cast_vote(self, vote):\r\n yes, no = self.__v_server.genrate_vote()\r\n if vote == 1:\r\n self.__c_server.add_vote(yes)\r\n elif vote == 0:\r\n self.__c_server.add_vote(no)\r\n else:\r\n print \"Invalid Input\"\r\n" }, { "alpha_fraction": 0.5553538799285889, "alphanum_fraction": 0.5862069129943848, "avg_line_length": 32.4375, "blob_id": "bccec22eb038dd688cb48f8f7298318b01192c6c", "content_id": "a5200485443ea74e30f21fc7196e531444c1b31b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1102, "license_type": "no_license", "max_line_length": 97, "num_lines": 32, "path": "/Verifier.py", "repo_name": "tanveerkn/threeway-handshake-protocol-implementation", "src_encoding": "UTF-8", "text": "import random\r\nfrom bson import objectid\r\nimport pickle\r\nfrom passlib.hash import sha512_crypt as CryptContext\r\nfrom demo_gmpy2 import test_encrypt\r\n\r\nclass Verifier:\r\n __verifiers = []\r\n\r\n def __init__(self):\r\n self.__verifiers = pickle.load(open(\"verfierData.pickle\", \"rb\"))\r\n\r\n def __add__voter(self, vote):\r\n self.__verifiers.append(vote)\r\n pickle.dump(self.__verifiers, open(\"verfierData.pickle\", \"wb\"))\r\n\r\n def genrate_vote(self):\r\n a_1 = 0\r\n a_2 = 1\r\n share_common = -(random.randint(1, 1000))\r\n share_0 = -(share_common)\r\n share_1 = share_0 + 1\r\n uid = ObjectId()\r\n env1 = CryptContext.encrypt(str(enc(0)) + str(enc(share_0)) + str(uid))\r\n env2 = CryptContext.encrypt(str(enc(1)) + str(enc(share_1)) + str(uid))\r\n self.__add__voter((env1, 0))\r\n self.__add__voter((env1, 1))\r\n return env1, env2\r\n\r\n def verifyVote(self, verifyVote):\r\n vote = [vote[1] for vote in self.__verifiers if CryptContext.verify(vote[0], verifyVote)]\r\n return vote[0] if len(vote) > 0 else -1\r\n" }, { "alpha_fraction": 0.55524080991745, "alphanum_fraction": 0.5892351269721985, "avg_line_length": 26.115385055541992, "blob_id": "ba554dafc20451cd9e0e5385d3209b3b07a33037", "content_id": "4313c6215608c0d87562ffd5cb6ad82887435795", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 706, "license_type": "no_license", "max_line_length": 57, "num_lines": 26, "path": "/PollingStations/Verifier.py", "repo_name": "tanveerkn/threeway-handshake-protocol-implementation", "src_encoding": "UTF-8", "text": "import random\nfrom bson import objectid\nimport pickle\nfrom passlib.hash import sha512_crypt as CryptContext\n\nclass Verifier():\n def attr_gen(self):\n a_1 = 0\n a_2 = 1\n share_common = -(random.randint(1, 1000))\n share_0 = -(share_common)\n share_1 = share_0 + 1\n x=objectid()\n def encryption(self):\n uid = ObjectId()\n env1 = str(enc(0)) + str(enc(share_0)) + str(uid)\n env2 = str(enc(1)) + str(enc(share_1)) + str(uid)\n def sig_gen(self):\n CryptContext.encrypt(env1)\n CryptContext.encrypt(env2)\n def sig_ver(self):\n CryptContext.verify(password, 'password')\n def decryption(self):\n\n\n def attr_ver(self):\n\n" }, { "alpha_fraction": 0.4974533021450043, "alphanum_fraction": 0.5042445063591003, "avg_line_length": 28.6842098236084, "blob_id": "7e7a0a8cc00a4fa29e89db228f48971c60146149", "content_id": "e5d82135bfc10dca6c329b17b24afeaa6333743e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 589, "license_type": "no_license", "max_line_length": 63, "num_lines": 19, "path": "/Counter.py", "repo_name": "tanveerkn/threeway-handshake-protocol-implementation", "src_encoding": "UTF-8", "text": "from Verifier import Verifier\r\nimport pickle\r\n\r\n\r\nclass Counter:\r\n verify_server = Verifier()\r\n\r\n def add_vote(self, vote):\r\n yes, no = pickle.load(open(\"votesCount.pickle\", \"rb\"))\r\n new = self.verify_server.verifyVote(verifyVote=vote)\r\n if new == 0:\r\n no += 1\r\n print \"Yes count : \" + yes + \"No count : \" + no\r\n elif new == 1:\r\n yes += 1\r\n print \"Yes count : \" + yes + \"No count : \" + no\r\n else:\r\n print \"inavalid vote\"\r\n pickle.dump((yes, no), open(\"votesCount.pickle\", \"wb\"))\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.5916666388511658, "alphanum_fraction": 0.6166666746139526, "avg_line_length": 14.125, "blob_id": "02a76de9b4afdca8ce1460393be023d969b77a2c", "content_id": "4ad7d0a3264f6158d0eff9611e41dbc4c1222320", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 120, "license_type": "no_license", "max_line_length": 34, "num_lines": 8, "path": "/PollingStations/Voter.py", "repo_name": "tanveerkn/threeway-handshake-protocol-implementation", "src_encoding": "UTF-8", "text": "import Verifier\n\nvotersList = []\n for voter in range(10):\n votersList.append(Voter())\n\nclass Voter():\n x_1=" }, { "alpha_fraction": 0.7419354915618896, "alphanum_fraction": 0.7419354915618896, "avg_line_length": 9.333333015441895, "blob_id": "8821bde70b1d429e2dd1a807fa45bd01395ee12c", "content_id": "bde8121006cafcf91ef9937de9d4b06581c4f6ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31, "license_type": "no_license", "max_line_length": 16, "num_lines": 3, "path": "/PollingStations/Counter.py", "repo_name": "tanveerkn/threeway-handshake-protocol-implementation", "src_encoding": "UTF-8", "text": "import Voter\n\nclass Counter():\n" } ]
7
cwj214228/wangyiyunMusic
https://github.com/cwj214228/wangyiyunMusic
14adf2b4fe4b069c1da8c9cfcaedc1b21d44be02
d0331e3bc8b20385b48fba034cb87548a3d9d474
73b40870f22e356a8ccd7f47c4e5e3b6bb5b19cd
refs/heads/master
2020-04-30T07:00:57.104274
2019-03-22T15:53:56
2019-03-22T15:53:56
176,671,664
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.60628741979599, "alphanum_fraction": 0.6077844500541687, "avg_line_length": 23.740739822387695, "blob_id": "d567f51419d1875d3c918c5fae90b294d80b74bc", "content_id": "4779d4b1dab58d1f9787e3a5bd4b4caafd430d65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 676, "license_type": "no_license", "max_line_length": 65, "num_lines": 27, "path": "/wangyiyunMusic/pipelines.py", "repo_name": "cwj214228/wangyiyunMusic", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport xlsxwriter\nimport time\nfrom twisted.enterprise import adbapi\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nclass WangyiyunmusicPipeline(object):\n def open_spider(self, spider):\n pass\n\n def process_item(self, item, spider):\n self.f = open(\"D:/爬虫数据/\"+str(item['name'])+'.txt', 'a')\n try:\n line = item['content'] + '\\n'\n self.f.write(line)\n except:\n pass\n return item\n\n\n def close_spider(self, spider):\n self.f.close()\n pass\n" }, { "alpha_fraction": 0.5265247225761414, "alphanum_fraction": 0.5467340350151062, "avg_line_length": 36.43243408203125, "blob_id": "327d6fffeedb6eb7f340d7e1b2f5d0dc4b183ae1", "content_id": "b925a70899e81931ca6530a81ffa8cadc9e22ebc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3073, "license_type": "no_license", "max_line_length": 99, "num_lines": 74, "path": "/wangyiyunMusic/spiders/comment.py", "repo_name": "cwj214228/wangyiyunMusic", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport random\nimport pymysql\nfrom wangyiyunMusic.items import WangyiyunmusicItem, musiclistItem\n\n\nclass CommentSpider(scrapy.Spider):\n name = 'comment'\n allowed_domains = ['musicapi.leanapp.cn']\n start_urls = ['http://musicapi.leanapp.cn/top/list?idx=1']\n conn = pymysql.connect('localhost', 'root', '5201314', 'leecx',\n charset='utf8') # 有中文要存入数据库的话要加charset='utf8'\n # 创建游标\n cursor = conn.cursor()\n\n def parse(self, response):\n # 解析json数据,获得所有热歌的id\n text = response.body\n jsondata = json.loads(text.decode('utf-8'))\n musiclist = jsondata['playlist']['tracks']\n musicItem=musiclistItem()\n # 自制ip连接池,把有用的ip和它对应的端口放在队列中,一会方便随机调用\n IPPOOL = [\n {\"ipaddr\": \"139.196.90.80:80\"},\n {\"ipaddr\": \"117.191.11.107:80\"},\n {\"ipaddr\": \"59.49.72.138:80\"}\n ]\n\n # sql语句\n # 执行插入数据到数据库操作\n sql = \"delete from musiclist\"\n self.cursor.execute(sql)\n self.conn.commit()\n # 根据歌曲的id,爬取评论,每首歌爬取10页评论\n for music in musiclist:\n musicItem['id'] = music['id']\n musicItem['name'] = music['name']\n thisip = random.choice(IPPOOL)\n\n self.cursor.execute(\"insert into musiclist(id,name) values (%s,%s)\",\n (musicItem['id'], musicItem['name']))\n # 提交,不进行提交无法保存到数据库\n self.conn.commit()\n for offset in range(0,100):\n limit = '20'\n offset = offset + 1\n url = 'http://musicapi.leanapp.cn/comment/music?' + \\\n 'id=' + str(musicItem['id']) + '&limit=' + limit + '&offset=' + str(offset)+\\\n '&proxy=http://'+thisip['ipaddr']+'/proxy.pac'\n yield scrapy.Request(url, callback=self.parse_getComment)\n\n # 这个方法用于解析评论的json数据,把解析好的数据打包发给pipeline。py进一步处理\n def parse_getComment(self, response):\n text=response.body\n x=response.request.url.split('&')[0]\n musicid=x.split('=')[1]\n sql = \"select name from musiclist where id=\" +musicid\n self.cursor.execute(sql)\n result = self.cursor.fetchone()\n jsondata = json.loads(text.decode('utf-8'))\n All_comments = jsondata['comments']\n item=WangyiyunmusicItem()\n for All_comment in All_comments:\n item['id']=musicid\n item['name'] = result\n # 用户名\n item['nickname'] = All_comment['user']['nickname'].replace(',', ',')\n # 用户ID\n item['userId'] = str(All_comment['user']['userId'])\n # 评论内容\n item['content'] = All_comment['content'].strip().replace('\\n', '').replace(',', ',')\n yield item\n\n" } ]
2
andrew-lundgren/pycbc_ml_working
https://github.com/andrew-lundgren/pycbc_ml_working
02b37f558e2a009d3b1b27eb8eb672426c2743a7
94707dc83f174933e72c88a5965b833de1b3705c
51ef69838ed01a920136a005350ece25d59d95a5
refs/heads/master
2021-01-11T19:18:27.982043
2017-05-23T13:43:05
2017-05-23T13:43:05
79,350,302
0
0
null
2017-01-18T14:54:55
2017-01-09T17:15:21
2017-01-18T14:29:32
null
[ { "alpha_fraction": 0.6036974787712097, "alphanum_fraction": 0.6238709092140198, "avg_line_length": 33.09856414794922, "blob_id": "4b5969e9a963a1871795feb89129626da5cc4c8e", "content_id": "52742799438de8579b62727e3e5584ad31aa99cc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16606, "license_type": "no_license", "max_line_length": 136, "num_lines": 487, "path": "/pycbc_qtransform/q-transform.py", "repo_name": "andrew-lundgren/pycbc_ml_working", "src_encoding": "UTF-8", "text": "# Copyright (C) 2017 Hunter A. Gabbard\n#\n# This program is free software; you can redistribute it and/or modify it\n# under the terms of the GNU General Public License as published by the\n# Free Software Foundation; either version 3 of the License, or (at your\n# option) any later version.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General\n# Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along\n# with this program; if not, write to the Free Software Foundation, Inc.,\n# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n\n#\n# =============================================================================\n#\n# Preamble\n#\n# =============================================================================\n#\n\n\"\"\"\nThis module retrives a timeseries and then calculates the q-transform of that time series\n\"\"\"\n\nfrom math import pi, ceil, log, exp\nimport numpy as np\nfrom pycbc.types.timeseries import FrequencySeries, TimeSeries\nimport os, sys\nfrom pycbc.frame import read_frame\nfrom pycbc.filter import highpass_fir, matched_filter\nfrom pycbc.waveform import get_fd_waveform\nfrom pycbc.psd import welch, interpolate\nfrom pycbc.fft import ifft\nimport urllib\nimport datetime\nfrom scipy.interpolate import (interp2d, InterpolatedUnivariateSpline)\nfrom numpy import fft as npfft\nimport argparse\nimport datetime\nimport scipy\n\nfrom matplotlib import use\nuse('Agg')\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.pyplot import specgram\n\n__author__ = 'Hunter Gabbard <[email protected]>'\n__credits__ = 'Duncan Macleod <[email protected]>'\n\n\ndef plotter_test(qplane, out_dir, now, frange, h1, sampling):\n \"\"\"\n Parameters\n \"\"\"\n\n # plot a spectrogram of the q-plane with the loudest normalized tile energy\n\n dx = 0.001 #time resolution \n dy = 1 #frequency resolution\n dur = int(len(h1)) / sampling #duration of analysis period in seconds\n\n # generate 2 2d grids for the x & y bounds\n y, x = np.mgrid[slice(int(frange[0]), int(frange[1]), dy), # Should replace zero/dur with start/end times\n slice(0, dur, dx)]\n z = qplane\n\n # x and y are bounds, so z should be the value *inside* those bounds.\n # Therefore, remove the last value from the z array.\n levels = MaxNLocator(nbins=15).tick_values(z.min(), z.max())\n\n # pick the desired colormap, sensible levels, and define a normalization\n # instance which takes data values and translates those into levels.\n cmap = plt.get_cmap('PiYG')\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n\n fig, ax0 = plt.subplots()\n\n im = ax0.pcolormesh(x, y, z, cmap=cmap, norm=norm)\n fig.colorbar(im, ax=ax0)\n ax0.set_title('pcolormesh with levels')\n\n # adjust spacing between subplots so `ax1` title and `ax0` tick labels\n # don't overlap\n fig.tight_layout()\n\n plt.savefig('%s/run_%s/spec.png' % (out_dir,now))\n\n\ndef plotter(qplane, out_dir, now, frange, h1, sampling):\n \"\"\"\n Parameters\n \"\"\"\n\n # plot a spectrogram of the q-plane with the loudest normalized tile energy\n\n dx = 0.001 #time resolution \n dy = 0.1 #frequency resolution\n dur = int(len(h1)) / sampling #duration of analysis period in seconds\n\n # generate 2 2d grids for the x & y bounds\n y, x = np.mgrid[slice(int(frange[0]), int(frange[1]), dy), # Should replace zero/dur with start/end times\n slice(-dur / 2, dur / 2, dx)]\n z = qplane\n\n # x and y are bounds, so z should be the value *inside* those bounds.\n # Therefore, remove the last value from the z array.\n levels = MaxNLocator(nbins=15).tick_values(z.min(), z.max())\n\n # pick the desired colormap, sensible levels, and define a normalization\n # instance which takes data values and translates those into levels.\n cmap = plt.get_cmap('plasma') #PiYG\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n\n fig, ax0 = plt.subplots()\n\n im = ax0.pcolormesh(x, y, z, cmap=cmap, norm=norm)\n fig.colorbar(im, ax=ax0)\n ax0.set_title('pcolormesh with levels')\n\n # adjust spacing between subplots so `ax1` title and `ax0` tick labels\n # don't overlap\n fig.tight_layout()\n\n plt.savefig('%s/run_%s/spec.png' % (out_dir,now))\n\ndef Qplane(qplane_tile_dict, h1, sampling, normalized, out_dir, now, frange):\n \"\"\"\n Parameters\n \"\"\"\n\n # perform q-transform on each tile for each q-plane and pick out the tile that has the largest normalized energy \n # store q-transforms of each tile in a dict\n qplane_qtrans_dict = {}\n tres=.001\n fres = 0.1\n dur = int(len(h1)) / sampling\n\n max_norm_energy = [] \n for i, key in enumerate(qplane_tile_dict):\n print key\n norm_energies_lst=[]\n for tile in qplane_tile_dict[key]:\n norm_energies = qtransform(h1, tile[1], tile[0], sampling, normalized)\n norm_energies_lst.append(norm_energies)\n if i == 0:\n max_norm_energy.append(max(norm_energies))\n max_norm_energy.append(tile)\n max_norm_energy.append(key)\n elif max(norm_energies) > max_norm_energy[0]:\n max_norm_energy[0] = max(norm_energies)\n max_norm_energy[1] = tile\n max_norm_energy[2] = key\n max_norm_energy[3] = norm_energies\n qplane_qtrans_dict[key] = np.array(norm_energies_lst)\n\n # build regular Spectrogram from peak-Q data by interpolating each\n # if you get lost, refer to https://github.com/gwpy/gwpy/blob/44d8d6381d2d03fb5d0b7d5484885512f9b841b1/gwpy/timeseries/timeseries.py\n # line 1780\n # (Q, frequency) `TimeSeries` to have the same time resolution\n\n\n # record peak q calculate above and q-transform output for peak q\n peakq = max_norm_energy[1][1]\n norm = qplane_qtrans_dict[max_norm_energy[2]] \n \n #create time array\n #time_array = np.zeros(int(dur / tres))\n #for idx, i in enumerate(time_array): \n # time_array[idx] = idx\n\n # interpolate rows for better time resolution\n interp_norm = []\n for i, row in enumerate(norm):\n #row_arry = np.zeros(len(row))\n #for idx, j in enumerate(row_arry): \n # row_arry[idx] = idx\n time_array = np.linspace(0,len(row),int(dur / tres))\n row_arry = np.linspace(0,len(row),len(row))\n interp = InterpolatedUnivariateSpline(row_arry, row) #Originally used this function: InterpolatedUnivariateSpline\n interp_norm.append(interp(time_array))\n #plt.figure(i)\n #plt.plot(interp(time_array))\n #plt.savefig('test/plot_%s.png' % (i))\n #plt.close()\n\n # then interpolate the spectrogram to increase the frequency resolution\n if fres is None: # unless user tells us not to\n return inter_norm\n else:\n # initialize some variables\n #time_array = np.zeros(int(dur / tres))\n #for idx, i in enumerate(time_array): \n # time_array[idx] = idx\n time_array = np.linspace(-int(dur / tres),int(dur / tres),int(dur / tres))\n time_null_array = np.zeros(int(dur / tres))\n frequencies = []\n for idx, i in enumerate(qplane_tile_dict[max_norm_energy[2]]):\n frequencies.append(i[0])\n\n # 2-D interpolation\n time_array = np.linspace(0,int(dur / tres),int(dur / tres))\n interp = interp2d(time_array, frequencies, interp_norm,\n kind='cubic')\n f2 = np.arange(int(frange[0]), int(frange[1]), fres)\n\n # this is the last part you need to fix\n out = interp(time_array, f2)\n \n return out \n\ndef qtiling(h1, qrange, frange, sampling, normalized, mismatch):\n \"\"\"\n Parameters\n \"\"\"\n\n deltam = deltam_f(mismatch)\n qrange = (float(qrange[0]), float(qrange[1]))\n frange = [float(frange[0]), float(frange[1])]\n dur = int(len(h1)) / sampling # length of your data chunk in seconds ... self.duration\n qplane_tile_dict = {}\n\n qs = list(_iter_qs(qrange, deltam))\n if frange[0] == 0: # set non-zero lower frequency\n frange[0] = 50 * max(qs) / (2 * pi * dur)\n if np.isinf(frange[1]): # set non-infinite upper frequency\n frange[1] = sampling / 2 / (1 + 11**(1/2.) / min(qs))\n\n #lets now define the whole tiling (e.g. choosing all tiling in planes)\n for q in qs:\n qtilefreq = np.array(list(_iter_frequencies(q, frange, mismatch, dur)))\n qlst = np.empty(len(qtilefreq), dtype=float)\n qlst.fill(q)\n qtiles_array = np.vstack((qtilefreq,qlst)).T\n qplane_tiles_list = list(map(tuple,qtiles_array))\n qplane_tile_dict[q] = qplane_tiles_list \n\n return qplane_tile_dict, frange\n\ndef deltam_f(mismatch):\n \"\"\"Fractional mismatch between neighbouring tiles\n :type: `float`\n \"\"\"\n return 2 * (mismatch / 3.) ** (1/2.)\n\n\ndef _iter_qs(qrange, deltam):\n \"\"\"Iterate over the Q values\n \"\"\"\n\n # work out how many Qs we need\n cumum = log(qrange[1] / qrange[0]) / 2**(1/2.)\n nplanes = int(max(ceil(cumum / deltam), 1))\n dq = cumum / nplanes\n for i in xrange(nplanes):\n yield qrange[0] * exp(2**(1/2.) * dq * (i + .5))\n raise StopIteration()\n\ndef _iter_frequencies(q, frange, mismatch, dur):\n \"\"\"Iterate over the frequencies of this `QPlane`\n \"\"\"\n # work out how many frequencies we need\n minf, maxf = frange\n fcum_mismatch = log(maxf / minf) * (2 + q**2)**(1/2.) / 2.\n nfreq = int(max(1, ceil(fcum_mismatch / deltam_f(mismatch))))\n fstep = fcum_mismatch / nfreq\n fstepmin = 1 / dur\n # for each frequency, yield a QTile\n for i in xrange(nfreq):\n yield (minf *\n exp(2 / (2 + q**2)**(1/2.) * (i + .5) * fstep) //\n fstepmin * fstepmin)\n raise StopIteration()\n\ndef qtransform(data, Q, f0, sampling, normalized):\n\n \"\"\"\n Parameters\n ----------\n data : `LIGO gwf frame file`\n raw time-series data set\n normalized : `bool`, optional\n normalize the energy of the output, if `False` the output\n is the complex `~numpy.fft.ifft` output of the Q-tranform\n f0 :\n central frequency\n sampling :\n sampling frequency of channel\n normalized:\n normalize output tile energies? \n \"\"\"\n\n #Q-transform data for each (Q, frequency) tile\n\n #Initialize parameters\n qprime = Q / 11**(1/2.) # ... self.qprime\n dur = int(len(data)) / sampling # length of your data chunk in seconds ... self.duration\n fseries = TimeSeries.to_frequencyseries(data)\n \n #Window fft\n window_size = 2 * int(f0 / qprime * dur) + 1\n \n #Get indices\n indices = _get_indices(window_size)\n\n #Apply window to fft\n windowed = fseries[get_data_indices(dur, f0, indices)] * get_window(dur, indices, f0, qprime, Q, sampling)\n\n # pad data, move negative frequencies to the end, and IFFT\n padded = np.pad(windowed, padding(window_size, dur, f0, Q), mode='constant')\n wenergy = npfft.ifftshift(padded)\n\n # return a `TimeSeries`\n wenergy = FrequencySeries(wenergy, delta_f=sampling)\n tdenergy = FrequencySeries.to_timeseries(wenergy)\n cenergy = TimeSeries(tdenergy,\n delta_t=1, copy=False) # Normally delta_t is dur/tdenergy.size ... must figure out better way of doing this\n if normalized:\n energy = type(cenergy)(\n cenergy.real() ** 2. + cenergy.imag() ** 2.,\n delta_t=1, copy=False)\n meanenergy = energy.numpy().mean()\n result = energy / meanenergy\n else:\n result = cenergy\n \n return result\n\ndef padding(window_size, dur, f0, Q):\n \"\"\"The `(left, right)` padding required for the IFFT\n :type: `tuple` of `int`\n \"\"\"\n pad = n_tiles(dur,f0,Q) - window_size\n return (int((pad - 5)/2.), int((pad + 5)/2.))\n\ndef get_data_indices(dur, f0, indices):\n \"\"\"Returns the index array of interesting frequencies for this row\n \"\"\"\n return np.round(indices + 1 +\n f0 * dur).astype(int)\n\ndef _get_indices(window_size):\n\n half = int((int(window_size) - 1.) / 2.) \n return np.arange(-half, half + 1)\n\ndef get_window(dur, indices, f0, qprime, Q, sampling):\n \"\"\"Generate the bi-square window for this row\n Returns\n -------\n window : `numpy.ndarray`\n \"\"\"\n # real frequencies\n wfrequencies = indices / dur\n\n # dimensionless frequencies\n xfrequencies = wfrequencies * qprime / f0\n\n # normalize and generate bi-square window\n norm = n_tiles(dur,f0,Q) / (dur * sampling) * (\n 315 * qprime / (128 * f0)) ** (1/2.)\n return (1 - xfrequencies ** 2) ** 2 * norm\n\ndef n_tiles(dur,f0,Q):\n \"\"\"The number of tiles in this row \n\n :type: 'int'\n \"\"\"\n \n\n tcum_mismatch = dur * 2 * pi * f0 / Q \n return next_power_of_two(tcum_mismatch / deltam())\n\ndef next_power_of_two(x):\n \"\"\"Return the smallest power of two greater than or equal to `x`\n \"\"\"\n return 2**(ceil(log(x, 2)))\n\ndef deltam():\n \"\"\"Fractional mismatch between neighbouring tiles\n :type: `float`\n \"\"\"\n mismatch = 0.2\n return 2 * (mismatch / 3.) ** (1/2.)\n\ndef main():\n #Get Current time\n cur_time = datetime.datetime.now()\n\n #construct the argument parse and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-u\", \"--usertag\", required=False, default=cur_time,\n help=\"label for given run\")\n ap.add_argument(\"-o\", \"--output-dir\", required=False,\n help=\"path to output directory\")\n ap.add_argument(\"-n\", \"--normalize\", required=False, default=True,\n help=\"normalize the energy of the output\")\n ap.add_argument(\"-s\", \"--samp-freq\", required=True, type=float,\n help=\"Sampling frequency of channel\")\n ap.add_argument(\"-f\", \"--freq-res\", required=False, type=float,\n default=0.1, help=\"frequency resolution\")\n ap.add_argument(\"-t\", \"--t-res\", required=False, type=float,\n default=0.001, help=\"time resolution\")\n\n args = ap.parse_args()\n\n\n #Initialize parameters\n out_dir = args.output_dir\n now = args.usertag\n os.makedirs('%s/run_%s' % (out_dir,now)) # Fail early if the dir already exists\n normalized = args.normalize # Set this as needed\n sampling = args.samp_freq #sampling frequency\n mismatch=.2\n qrange=(4,64)\n frange=(0,np.inf)\n\n # Read data and remove low frequency content\n fname = 'H-H1_LOSC_4_V2-1126259446-32.gwf'\n url = \"https://losc.ligo.org/s/events/GW150914/\" + fname\n urllib.urlretrieve(url, filename=fname)\n h1 = read_frame('H-H1_LOSC_4_V2-1126259446-32.gwf', 'H1:LOSC-STRAIN')\n #h1 = TimeSeries(np.random.normal(size=64*4096), delta_t = 1. / sampling)\n h1 = highpass_fir(h1, 15, 8)\n\n # Calculate the noise spectrum\n psd = interpolate(welch(h1), 1.0 / 32)\n\n # Diagnostic q-transform test\n #import scipy\n #h1 = TimeSeries(scipy.signal.gausspulse(np.linspace(-8.,8.,16*sampling)), delta_t = 1. / sampling)\n\n \"\"\"\n #h1 = qtransform(h1, 20, 50, sampling, normalized)\n t = np.linspace(0,64,64)\n f = np.linspace(0,64,64)\n dt = 1\n df = 1\n y, x = np.mgrid[slice(f.min(), f.max(), df), slice(t.min(), t.max(), dt)]\n def e_func(t, f):\n return np.exp(-0.1*(t-32)**2 - 0.1*(f-32)**2)\n z = e_func(y, x)\n\n # x and y are bounds, so z should be the value *inside* those bounds.\n # Therefore, remove the last value from the z array.\n levels = MaxNLocator(nbins=15).tick_values(z.min(), z.max())\n\n # pick the desired colormap, sensible levels, and define a normalization\n # instance which takes data values and translates those into levels.\n cmap = plt.get_cmap('PiYG')\n norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)\n\n fig, ax0 = plt.subplots()\n\n im = ax0.pcolormesh(x, y, z, cmap=cmap, norm=norm)\n fig.colorbar(im, ax=ax0)\n ax0.set_title('pcolormesh with levels')\n\n # adjust spacing between subplots so `ax1` title and `ax0` tick labels\n # don't overlap\n #fig.tight_layout()\n\n\n plt.savefig('/Users/hugabb/pycbc_detection_statistic/pycbc_qtransform/plotter_test.png')\n plt.close()\n sys.exit()\n \"\"\"\n\n #perform Q-tiling\n Qbase, frange = qtiling(h1, qrange, frange, sampling, normalized, mismatch)\n\n #Choose Q-plane and plot\n qplane = Qplane(Qbase, h1, sampling, normalized, out_dir, now, frange)\n\n #Plot spectrogram\n plotter(qplane, out_dir, now, frange, h1, sampling)\n\n print 'Done!'\n\nif __name__ == '__main__':\n main()\n" } ]
1
sebdumancic/knorf_aaai21
https://github.com/sebdumancic/knorf_aaai21
eac4fae00d09893eac6a234e25e3eb98f74021b0
3c72a5a788b0676316362a89eea5489a60c4dac0
bdbc9afe9fe4ae9f24ca027215de3bd394311760
refs/heads/main
2023-03-07T16:27:36.353999
2021-02-26T16:18:52
2021-02-26T16:18:52
342,615,905
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.5218045115470886, "alphanum_fraction": 0.5218045115470886, "avg_line_length": 13.777777671813965, "blob_id": "7b3055f2453a7fc302d8915fa78b1847971f567f", "content_id": "2cc4c27d2c85ffb5aca86a540f22c9342f62c4c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 36, "num_lines": 45, "path": "/loreleai/language/lp/__init__.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "from .lp import ClausalTheory, parse\nfrom ..commons import (\n Term,\n Constant,\n Variable,\n Structure,\n Predicate,\n Type,\n Not,\n Type,\n Theory,\n c_pred,\n c_const,\n c_id_to_const,\n c_var,\n c_literal,\n Literal,\n Clause,\n Recursion,\n Disjunction,\n are_variables_connected\n)\n\n__all__ = [\n \"Term\",\n \"Constant\",\n \"Variable\",\n \"Structure\",\n \"Predicate\",\n \"Type\",\n \"Not\",\n \"Type\",\n \"Theory\",\n \"ClausalTheory\",\n \"c_pred\",\n \"c_const\",\n \"c_id_to_const\",\n \"c_var\",\n \"c_literal\",\n \"Clause\",\n \"Literal\",\n 'Recursion',\n 'Disjunction',\n \"are_variables_connected\"\n]\n" }, { "alpha_fraction": 0.5470016002655029, "alphanum_fraction": 0.5559157133102417, "avg_line_length": 27.045454025268555, "blob_id": "dd72abc2a3ca9816974b4fc7dc5641c872b67764", "content_id": "3dd9f9a7476ac40672e092a46e0c4ae6c9529313", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3702, "license_type": "no_license", "max_line_length": 82, "num_lines": 132, "path": "/tests/restructuring_test.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "import os\n\nfrom loreleai.language.lp import parse, ClausalTheory\nfrom loreleai.learning.restructuring import Restructor\n\n\nclass RestructuringMethods:\n def candidate_generation(self):\n clauses = [\n \"t1(X,Y) :- a(X,Y), b(X,Y), c(X)\",\n \"t2(X,Y) :- a(X,Y), b(Y,Z), c(Z)\",\n \"t3(X,Y) :- b(X,Z), d(X, Y), c(X)\",\n ]\n\n clauses = [parse(x) for x in clauses]\n theory = ClausalTheory(clauses)\n\n restruct = Restructor(max_literals=2)\n\n cands = restruct._get_candidates(theory)\n\n assert len(cands) == 4\n\n all_cands = set()\n\n for p in cands:\n all_cands = all_cands.union(cands[p])\n\n assert len(all_cands) == 7\n\n def simple_encoding_restructuring_space(self):\n clauses = [\"t1(X,Y) :- a(X,Y), b(X,Y), c(X)\"]\n clauses = [parse(x) for x in clauses]\n theory = ClausalTheory(clauses)\n\n restruct = Restructor(max_literals=2)\n all_cands = restruct._get_candidates(theory)\n cands = restruct._encode_theory(theory, all_cands)\n\n assert len(cands) > 0\n\n assert str(list(cands.keys())[0]) == str(clauses[0])\n\n assert len(cands[clauses[0]]) == 3\n\n def restructuring_encoding_theory(self):\n clauses = [\n \"t1(X,Y) :- a(X,Y), b(X,Y), c(X)\",\n \"t2(X,Y) :- a(X,Y), b(Y,Z), c(Z)\",\n \"t3(X,Y) :- b(X,Z), d(X,Y), c(X)\",\n ]\n clauses = [parse(x) for x in clauses]\n theory = ClausalTheory(clauses)\n\n restruct = Restructor(max_literals=2)\n all_cands = restruct._get_candidates(theory)\n cands = restruct._encode_theory(theory, all_cands)\n\n assert len(cands) > 0\n\n assert len(cands) == 3\n\n distinct_candidates = set()\n\n for p in all_cands:\n distinct_candidates = distinct_candidates.union(all_cands[p])\n\n assert len(distinct_candidates) == 7\n\n def restructuring_no_redundancy(self):\n clauses = [\n \"t1(X,Y) :- a(X,Y), b(X,Y), c(X)\",\n \"t2(X,Y) :- a(X,Y), b(Y,Z), c(Z)\",\n \"t3(X,Y) :- b(X,Z), d(X,Y), c(X)\",\n ]\n\n clauses = [parse(x) for x in clauses]\n theory = ClausalTheory(clauses)\n\n restruct = Restructor(max_literals=2)\n all_cands = restruct._get_candidates(theory)\n cands = restruct._encode_theory(theory, all_cands)\n\n redunds, _ = restruct._find_redundancies(cands)\n\n assert len(redunds) == 0\n\n def restructuring_redundancy(self):\n clauses = [\n \"t1(X,Y) :- a(X,Y), b(X,Y), c(X)\",\n \"t2(X,Y) :- a(X,Y), b(Y,Z), c(Z)\",\n \"t3(X,Y) :- a(X,Y), b(X,Y), d(X,Z), c(X)\",\n ]\n\n clauses = [parse(x) for x in clauses]\n theory = ClausalTheory(clauses)\n\n restruct = Restructor(max_literals=2)\n\n all_cands = restruct._get_candidates(theory)\n cands = restruct._encode_theory(theory, all_cands)\n\n redunds, _ = restruct._find_redundancies(cands)\n\n # for k, v in redunds.items():\n # print(k, v)\n\n assert len(redunds) == 4\n\n def restructuring_unfold(self):\n theory_file = (\n os.path.dirname(__file__) + \"/../data/restructuring/robots_example.pl\"\n )\n theory = ClausalTheory(read_from_file=theory_file)\n\n unfolded_theory = theory.unfold()\n\n assert len(unfolded_theory) == 170\n\n\ndef test_restructuring():\n test = RestructuringMethods()\n\n test.candidate_generation()\n test.simple_encoding_restructuring_space()\n test.restructuring_encoding_theory()\n test.restructuring_no_redundancy()\n test.restructuring_redundancy()\n test.restructuring_unfold()\n\n\n# test_restructuring()\n" }, { "alpha_fraction": 0.7166666388511658, "alphanum_fraction": 0.7166666388511658, "avg_line_length": 19.33333396911621, "blob_id": "23703072948930a5b15fa45da5951134285de01e", "content_id": "a2a30ce4dc25a9e4f671d2eb9122edf9f681006f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 60, "license_type": "no_license", "max_line_length": 34, "num_lines": 3, "path": "/loreleai/reasoning/lp/kanren/__init__.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "from .minikanren import MiniKanren\n\n__all__ = ['MiniKanren']" }, { "alpha_fraction": 0.751937985420227, "alphanum_fraction": 0.751937985420227, "avg_line_length": 31.25, "blob_id": "77e100452d3aed484d71407f10efb9d61ee4630d", "content_id": "40414e5a8f3e9f09588978c0ec411c5449b8b430", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 50, "num_lines": 4, "path": "/loreleai/reasoning/lp/datalog/__init__.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "#from loreleai.reasoning.lp.datalog.muz import MuZ\nfrom .datalogsolver import DatalogSolver\n\n#__all__ = ['MuZ', 'DatalogSolver']\n" }, { "alpha_fraction": 0.7118155360221863, "alphanum_fraction": 0.7238232493400574, "avg_line_length": 31.53125, "blob_id": "52a17ffd7b55496f62dac49f412fbc7074bfb16d", "content_id": "506cb60da70c1ec763b77d026b4f4d613aa30a0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2082, "license_type": "no_license", "max_line_length": 206, "num_lines": 64, "path": "/README.md", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "This repository contains the experiments for the paper:\n\nS. Dumancic, T. Guns, and A. Cropper: [Knowledge Refactoring for Inductive Program Synthesis](https://arxiv.org/abs/2004.09931). AAAI '21\n\n\nFor any questions, contact [Sebastijan](https://sebdumancic.github.io)\n\n\n\n\n\n### Reproducing results\n\n\n\n\nTo reproduce the experiments, you need to perform several steps:\n 1. **Generate and learn play tasks**\n \n Go to the `playgol_experiments` folder and follow the instructions. This is a slight modification of the [Playgol](http://andrewcropper.com/pubs/ijcai19-playgol.pdf), with clause deduplication disabled.\n \n **NOTE: this step is not needed to reproduce the experiments. This step essentially produces data; you should use the same data as in the paper.**\n \n 2. **Refactor programs** \n \n Go to `experiments/knorf` folder.\n\n Run `python refactor_programs.py strings pncorrect \"thr:8\"` to refactor string transformation programs (thr:8 means \"use 8 threads\"). To refactor `lego` programs, replace `strings` with `lego.\n\n 3. **Learn programs**\n\n Go to the `experiments/knorf/runners` folder. For string transfromation experiments, go further to `strings` folder; for lego tasks, go to `lego` folder. \n\n Run `python runner.py learn-build-p \"\"` to solve the build/target tasks.\n\n 4. **Test the programs**\n\n Run `python runner.py test \"\"` in the same folder.\n\n 5. **Obtain results**\n\n To get results in JSON format, run `python runner.py results \"\"`.\n\n To get program sizes, replace `results` with `size`. To get the runtimes, replace `results` with `runtime-trial`.\n\n\n\n\n\n\n### Results with redundancies removed in a naive way\n\n\nTo replicate the results with naive removal of redundancies (i.e., introduce a new predicate for each redundancy in the program), follow these steps:\n\n 1. **Remove redundancies**\n\n Go to the `experiments/knorf` folder.\n\n Run `python compress_redundancies.py strings pncorrect \"\" ` (use `lego` for Lego experiments).\n\n 2. **Repeat steps 3-5 from the procedure above**\n\n Use `runner_nonredundant.py` instead of `runner.py`\n" }, { "alpha_fraction": 0.5731860995292664, "alphanum_fraction": 0.5796530246734619, "avg_line_length": 36.964073181152344, "blob_id": "f49ddcf4f84013a8c81c2b56ed06ca357808a293", "content_id": "1d9246e7f9c1aebdb9ac7f7b40ca5fb98237bcab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6340, "license_type": "no_license", "max_line_length": 233, "num_lines": 167, "path": "/experiments/knorf/compress_redundancies.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "import datetime\nimport os\nimport sys\nfrom itertools import combinations\nfrom functools import reduce\n\nfrom loreleai.language.lp import ClausalTheory\nfrom loreleai.learning.restructuring import Restructor\nfrom loreleai.language.commons import are_variables_connected, Literal, Predicate, Clause\n\nROOT_FOLDER = os.path.abspath(os.path.dirname(__file__))\nTRIAL = [1]\nNUMBER_PLAY_TASKS = list(range(200, 4200, 200)) #[200, 400, 600, 800, 1000]\nTHREADS = 4\nMAX_TIME_S = 1 * 2 * 60 # hour * min * sec\n\n\ndef remove_redundancies(input_theory_file, output_theory_file):\n theory = ClausalTheory(read_from_file=input_theory_file)\n\n redundancy_counts = {}\n\n for cl in theory.get_formulas():\n enc = cl.get_atoms()\n for l in range(2, len(enc) + 1):\n for env_cmb in combinations(enc, l):\n if not are_variables_connected(env_cmb):\n continue\n\n #env_cmb = sorted(env_cmb, key=lambda x: x.get_predicate().get_name())\n\n # order variables\n var_indices = {}\n for atm in env_cmb:\n for v in atm.get_variables():\n if v not in var_indices:\n var_indices[v] = len(var_indices)\n\n # create informative key (not depending on variable names)\n signature = tuple([\n f'{x.get_predicate().get_name()}({\",\".join([str(var_indices[y]) for y in x.get_variables()])})'\n for x in env_cmb])\n\n if signature not in redundancy_counts:\n redundancy_counts[signature] = {}\n\n if cl not in redundancy_counts[signature]:\n redundancy_counts[signature][cl] = []\n\n redundancy_counts[signature][cl].append(env_cmb)\n\n # filter to redundancies that reoccure\n redundancy_counts = dict([(k, v) for k, v in redundancy_counts.items() if len(v) > 1])\n\n clauses_representing_redundancy = {}\n redundant_theory = []\n new_clause_construction_index = 1\n\n clauses_to_change = reduce(lambda x, y: x.union(y), {tuple(v.keys()) for k, v in redundancy_counts.items()}, set())\n clause_to_redundancy_sign = {}\n for red_sig in redundancy_counts:\n for incl in redundancy_counts[red_sig]:\n if incl not in clause_to_redundancy_sign:\n clause_to_redundancy_sign[incl] = set()\n clause_to_redundancy_sign[incl].add(red_sig)\n\n new_theory = []\n for cl in theory.get_formulas():\n if cl in clauses_to_change:\n # go over all redundancy signatures\n for red_sig in clause_to_redundancy_sign[cl]:\n # replace all redundancies\n # explicit assumption: take the first and the last variable\n all_redundancies = redundancy_counts[red_sig][cl]\n new_cl = cl\n\n for redundancy in all_redundancies:\n var_appearance = {}\n for ind, lit in enumerate(redundancy):\n for v in lit.get_variables():\n if v not in var_appearance:\n var_appearance[v] = set()\n var_appearance[v].add(ind)\n\n if len(var_appearance) == len(cl.get_head().get_terms()):\n var_appearance = cl.get_head().get_terms()\n else:\n var_appearance = dict([(k,v) for k, v in var_appearance.items() if len(v) == 1])\n var_appearance = sorted(list(var_appearance.keys()), key=lambda x: var_appearance[x])\n\n if red_sig not in clauses_representing_redundancy:\n new_head_predicate = Predicate(f\"redpred_{new_clause_construction_index}\", len(var_appearance))\n new_clause_construction_index += 1\n new_head_literal = Literal(new_head_predicate, var_appearance)\n rcl = Clause(new_head_literal, redundancy)\n clauses_representing_redundancy[red_sig] = rcl\n redundant_theory.append(rcl)\n\n literal_to_use = clauses_representing_redundancy[red_sig].get_head()\n new_cl = new_cl.substitute_atoms(redundancy, literal_to_use, dict(zip(literal_to_use.get_terms(), var_appearance)))\n\n new_theory.append(new_cl)\n else:\n new_theory.append(cl)\n\n new_theory = redundant_theory + new_theory\n\n out_th = open(output_theory_file, 'w')\n for cl in new_theory:\n out_th.write(str(cl) + \"\\n\")\n out_th.close()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nDOMAIN = sys.argv[1]\nsetup = sys.argv[2]\n\nother_setup = sys.argv[3] # expected to be in form parameter:value separated with white-space\nother_setup = other_setup.strip().split()\nother_setup = dict([tuple(c.split(\":\")) for c in other_setup])\n\nif 't' in other_setup:\n if ',' not in other_setup['t']:\n TRIAL = [int(other_setup['t'])]\n else:\n TRIAL = [int(x) for x in other_setup['t'].split(',')]\n\nif 'pt' in other_setup:\n if ',' not in other_setup['pt']:\n NUMBER_PLAY_TASKS = [int(other_setup['pt'])]\n else:\n NUMBER_PLAY_TASKS = [int(x) for x in other_setup['pt'].split(\",\")]\n\n\nPROGRAM_FOLDER = ROOT_FOLDER + f\"/programs_{setup}\"\nOUTPUT_FOLDER = ROOT_FOLDER + f\"/nonredundant_programs\"\n# SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n\nif not os.path.exists(OUTPUT_FOLDER):\n os.mkdir(OUTPUT_FOLDER)\n\nif not os.path.exists(f'{OUTPUT_FOLDER}/{DOMAIN}'):\n os.mkdir(f'{OUTPUT_FOLDER}/{DOMAIN}')\n\nif not os.path.exists(f'{OUTPUT_FOLDER}/{DOMAIN}'):\n os.mkdir(f'{OUTPUT_FOLDER}/{DOMAIN}')\n\nfor num_plays in NUMBER_PLAY_TASKS:\n for trial in TRIAL:\n file_name = f'programs-{num_plays}-{trial}.pl'\n refactored_fn = f'refactored_{file_name}'\n\n print(f\"[{datetime.datetime.now()}] working on {file_name}\")\n print(f\" refactoring to {refactored_fn}\")\n remove_redundancies(f'{PROGRAM_FOLDER}/{DOMAIN}/{file_name}', f'{OUTPUT_FOLDER}/{DOMAIN}/{refactored_fn}')\n" }, { "alpha_fraction": 0.5698348879814148, "alphanum_fraction": 0.5723539590835571, "avg_line_length": 50.24897384643555, "blob_id": "559258414371ed5b3e7fd96e6fe03b298efdeadd", "content_id": "2c6004b931df039809bac5f7369f63606ed82933", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 50018, "license_type": "no_license", "max_line_length": 208, "num_lines": 976, "path": "/loreleai/learning/restructuring.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "import logging\nimport sys\nfrom functools import reduce\nfrom itertools import combinations\nfrom typing import Set, Dict, List, Tuple, Iterator, Union, Sequence\n\nfrom ortools.sat.python import cp_model\n\nfrom loreleai.language.commons import c_pred, c_literal, Literal, Clause, are_variables_connected, \\\n _are_two_set_of_literals_identical, _create_term_signatures\nfrom loreleai.language.lp import Predicate, Term, ClausalTheory, Variable\n\nNUM_PREDICATES = 1\nNUM_LITERALS = 2\n\n\nclass Restructor:\n \"\"\"\n Implements the theory restructuring functionality\n\n Args:\n max_literals (int): maximal number of literals to use in the restructuring clauses\n min_literals (int, optional): minimal number of literals to use in the restructuring clauses\n head_variable_selection (int, optional): how to select variables for the head of the latent predicate\n 1 - take all\n 2 - take max_arity vars\n max_arity (int, optional): the number of variables to take in the head of the latent predicates when\n head_variable_selection = 2\n \"\"\"\n\n def __init__(self, max_literals: int, min_literals: int = 2, head_variable_selection: int = 2, max_arity: int = 2,\n prevent_redundancies=False, minimise_redundancy=False, exact_redundancy=False, exclude_redundant_cands=False,\n exclude_alternatives=False, reject_singleton=True, allow_no_refactoring=False,\n objective_type=NUM_PREDICATES,\n logl=logging.INFO, logfile: str = None, logger=None):\n self.max_literals = max_literals\n self.min_literals = min_literals\n self._objective_type = objective_type\n self.aux_candidate_counter = 0\n self.head_variable_selection_strategy = head_variable_selection\n self.max_arity = max_arity\n self.enumerated_bodies = {}\n self.enumerated_body_signatures = {}\n self.candidate_usage_count = {}\n self.minimise_redundancy = minimise_redundancy\n self.prevent_redundancies = prevent_redundancies\n self.minimise_redundancy_absolute_count = exact_redundancy\n self.reject_singletons = reject_singleton\n self.allow_no_refactoring = allow_no_refactoring\n self._candidate_exclusion = []\n self.exclude_alternatives = exclude_alternatives\n self.exclude_redundant_candidates = exclude_redundant_cands\n self.redundant_candidates = []\n self.count_candidates = 0\n self.equals_zero = None\n self.log_level = logl\n\n # logging setup\n self._logger = logger if logger else logging.getLogger(logfile if logfile else '')\n\n if logfile is not None:\n log_file = logging.FileHandler(logfile)\n log_file.setLevel(logl)\n log_file.setFormatter(logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s'))\n self._logger.addHandler(log_file)\n else:\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logl)\n console_handler.setFormatter(logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s'))\n self._logger.addHandler(console_handler)\n\n self._logger.setLevel(logl)\n\n def _get_candidate_index(self):\n \"\"\"\n Generates a unique index for a new latent head\n\n Returns:\n integer\n \"\"\"\n self.aux_candidate_counter += 1\n return self.aux_candidate_counter\n\n def __create_latent_clause(self, literals: List[Literal], variable_strategy: int = 1, max_arity: int = 2) -> List[Clause]:\n if not are_variables_connected(literals):\n # if the variables are not connected in a graph, that makes it an invalid candidate\n return []\n\n head_name = f'latent{self._get_candidate_index()}'\n available_vars = {}\n for lit in literals:\n for v in lit.get_variables():\n if v not in available_vars:\n available_vars[v] = len(available_vars)\n available_vars = sorted(available_vars, key=lambda x: available_vars[x])\n\n if variable_strategy == 1 or len(available_vars) == max_arity:\n # take all variables or number of variables is equal to max arity\n head_pred = c_pred(head_name, len(available_vars), [x.get_type() for x in available_vars])\n # Predicate(head_name, len(available_vars), [x.get_type() for x in available_vars])\n atom = c_literal(head_pred, available_vars) # Atom(head_pred, available_vars)\n cl = Clause(atom, literals)\n\n if self.reject_singletons and cl.has_singleton_var():\n return []\n else:\n self.count_candidates += 1\n return [cl]\n elif variable_strategy == 2:\n # need to select a subset of variables\n clauses = []\n\n for ind, var_cmb in enumerate(combinations(available_vars, max_arity)):\n # head_pred = Predicate(f'{head_name}_{ind + 1}', len(var_cmb), [x.get_type() for x in var_cmb])\n head_pred = c_pred(f'{head_name}_{ind + 1}', len(var_cmb), [x.get_type() for x in var_cmb])\n atom = c_literal(head_pred, list(var_cmb)) # Atom(head_pred, list(var_cmb))\n cl = Clause(atom, literals)\n\n if self.reject_singletons and cl.has_singleton_var():\n pass\n else:\n self.count_candidates += 1\n clauses.append(cl)\n\n # remember the alternatives, and add constraint that only one of these can be taken\n # assumes that all candidates have unique heads\n if self.exclude_alternatives and len(clauses) > 1:\n self._candidate_exclusion.append([x.get_head().get_predicate().get_name() for x in clauses])\n\n return clauses\n else:\n raise Exception(f'Unknown head variable selection strategy {variable_strategy}')\n\n def __process_candidates(self, accumulator: Dict[Predicate, Set[Clause]], clause: Clause) -> Dict[Predicate, Set[Clause]]:\n for length in range(self.min_literals, self.max_literals + 1):\n for cmb in combinations(clause.get_atoms(), length):\n cmb = list(cmb)\n # predicate_sig = set([x.get_predicate() for x in cmb])\n predicate_sig = tuple(sorted([x.get_predicate() for x in cmb], key=lambda x: x.get_name()))\n\n potential_matches = [x for x in self.enumerated_bodies.get(predicate_sig, []) if len(x) == len(cmb)]\n\n if not any([_are_two_set_of_literals_identical(cmb, self.enumerated_body_signatures[x]) for x in potential_matches]):\n\n if predicate_sig not in self.enumerated_bodies:\n self.enumerated_bodies[predicate_sig] = set()\n\n self.enumerated_bodies[predicate_sig].add(tuple(cmb))\n self.enumerated_body_signatures[tuple(cmb)] = _create_term_signatures(cmb)\n\n clauses = self.__create_latent_clause(list(cmb), self.head_variable_selection_strategy, self.max_arity)\n for cl in clauses:\n for p in predicate_sig:\n if p not in accumulator:\n accumulator[p] = set()\n accumulator[p].add(cl)\n\n return accumulator\n\n def _get_candidates(self, clauses: Union[ClausalTheory, Sequence[Clause]]) -> Dict[Predicate, Set[Clause]]:\n \"\"\"\n Extracts candidates for restructuring from the clauses in the theory\n\n Args:\n clauses: theory, a set of clauses to restructure\n\n Returns:\n a set of candidates represented as a dictionary:\n -- key: predicate\n -- value: all clauses having that predicate in the body\n \"\"\"\n self._logger.info(\"Enumerating candidates...\")\n\n return reduce(self.__process_candidates,\n clauses.get_formulas() if isinstance(clauses, ClausalTheory) else clauses,\n {})\n\n def __encode(self, atoms_to_cover: Sequence[Literal],\n atoms_covered: Set[Literal],\n atom_covering: Dict[Literal, Dict[Clause, Set[Tuple[List[Literal], Dict[Term, Term]]]]],\n target_clause_head_vars: Set[Variable],\n prefix=\" \",\n allow_partial=False) -> Tuple[Set[Set[Literal]], Set[Clause]]:\n \"\"\"\n Encoding of a set of atoms\n :param atoms_to_cover:\n :param atoms_covered:\n :param atom_covering:\n :param prefix:\n :return:\n \"\"\"\n\n if len(atoms_to_cover) == 0:\n return set(), set()\n\n focus_atom = atoms_to_cover[0]\n # self._logger.debug(f'{prefix}| focusing on {focus_atom}')\n\n matching_clauses = atom_covering[focus_atom].keys()\n used_clauses = set()\n # print(f'{prefix}| found matching clauses {matching_clauses}')\n encodings = set()\n\n for cl in matching_clauses:\n for match in atom_covering[focus_atom][cl]:\n # self._logger..debug(f'{prefix}| processing clause {cl} with match {match}')\n atms, sbs = match # subs: key - variables in cl, value -- variables to use as the substitutions (from )\n new_atoms_to_cover = [x for x in atoms_to_cover if x not in atms and x != focus_atom]\n new_atoms_covered = atoms_covered.union(atms)\n\n # make sure that none of the variables that would be kicked out are needed in the rest of the body\n retained_variables = set([sbs[x] for x in cl.get_head().get_variables()])\n kicked_out_variables = reduce((lambda x, y: x + y), [x.get_variables() for x in atms], [])\n kicked_out_variables = [x for x in kicked_out_variables if x not in retained_variables]\n\n if len(new_atoms_to_cover):\n variables_in_the_rest_of_the_body = reduce((lambda x, y: x.union(y)), [x.get_variables() for x in new_atoms_to_cover], set()).union(target_clause_head_vars)\n else:\n variables_in_the_rest_of_the_body = target_clause_head_vars\n\n if any([x in variables_in_the_rest_of_the_body for x in kicked_out_variables]):\n if allow_partial:\n encodings.add({focus_atom})\n else:\n continue\n else:\n used_clauses.add(cl)\n # self._logger.debug(f'{prefix}| atoms covered: {new_atoms_covered}; atoms to cover: {new_atoms_to_cover}')\n encoding_rest, inner_used = self.__encode(new_atoms_to_cover, new_atoms_covered, atom_covering, target_clause_head_vars.union(retained_variables), prefix=prefix * 2)\n used_clauses = used_clauses.union(inner_used)\n # self._logger.debug(f'{prefix}| encodings of the rest: {encoding_rest}')\n\n if len(encoding_rest) == 0 and len(new_atoms_to_cover) == 0:\n encodings.add(frozenset({cl.get_head().substitute(sbs)}))\n else:\n for enc_rest in encoding_rest:\n encodings.add(enc_rest.union([cl.get_head().substitute(sbs)]))\n\n return encodings, used_clauses\n\n def _encode_clause(self, clause: Clause,\n candidates: Dict[Predicate, Set[Clause]],\n originating_clause: Clause,\n allow_partial=False) -> List[Clause]:\n \"\"\"\n Finds all possible encodings of the given clause by means of candidates\n\n Args:\n clause (Clause): a clause to encode\n candidates (Dict[Predicate, Set[Clause]): candidates to use to encode the provided clause\n \"\"\"\n self._logger.warning(f'\\tencoding clause {clause}')\n\n clause_predicates = clause.get_predicates()\n filtered_candidates = dict([(k, v) for (k, v) in candidates.items() if k in clause_predicates])\n partial_rew_preds = reduce((lambda x, y: x.union(y)), filtered_candidates.values())\n partial_rew_preds = set([x.get_head().get_predicate() for x in partial_rew_preds if len(x) == 1])\n\n # create index structure so that it is easy to get to the candidates that cover different atoms\n atom_to_covering_clause_index = {}\n for p in filtered_candidates:\n for cand in filtered_candidates[p]:\n for answer in cand.is_part_of(clause):\n atms, sbs = answer\n if len(sbs) == 0:\n continue\n for atm in atms:\n if atm not in atom_to_covering_clause_index:\n atom_to_covering_clause_index[atm] = {}\n if cand not in atom_to_covering_clause_index[atm]:\n atom_to_covering_clause_index[atm][cand] = []\n if answer not in atom_to_covering_clause_index[atm][cand]:\n atom_to_covering_clause_index[atm][cand].append(answer)\n\n encoding, used_clauses = self.__encode(clause.get_atoms(), set(), atom_to_covering_clause_index, set(clause.get_head().get_variables()), allow_partial=allow_partial)\n # if refactoring does not reduce the length on th clause, reject it\n # reject also if the refactored clause mostly consists of unary mappings (partial rewrites)\n encoding = [Clause(clause.get_head(), list(x)) for x in encoding if len(x) < len(clause)]\n\n filtered_1 = [x for x in encoding if len(partial_rew_preds.intersection(x.get_predicates())) < int(len(clause)/2)]\n # if len(filtered_1) == 0:\n # filtered_1 = [x for x in encoding if len(partial_rew_preds.intersection(x.get_predicates())) <= int(len(clause)/2)]\n encoding = [x for x in filtered_1]\n\n for cl in encoding:\n cl.add_property(\"parent\", originating_clause if originating_clause else clause)\n\n # update candidate counts\n for cl in used_clauses:\n if cl not in self.candidate_usage_count:\n self.candidate_usage_count[cl] = 0\n self.candidate_usage_count[cl] += 1\n\n return list(encoding)\n\n def _prune_candidate_set(self, candidates: Dict[Predicate, Set[Clause]]) -> Tuple[Dict[Predicate, Set[Clause]], Set[Clause]]:\n \"\"\"\n Prunes the set of candidates; removes all candidates for which\n length(candidate) * usage(candidate) < length(candidate) + usage(candidate)\n\n Returns:\n in set: candidates to keep\n out set: pruned candidates\n \"\"\"\n\n return dict([(k, set([x for x in v if len(x) * self.candidate_usage_count.get(x, 0) > len(x) + self.candidate_usage_count.get(x, 0)])) for k, v in candidates.items()]), \\\n reduce((lambda x, y: x.union(y)), [set([x for x in v if len(x) * self.candidate_usage_count.get(x, 0) <= len(x) + self.candidate_usage_count.get(x, 0)]) for k, v in candidates.items()], set())\n\n def _encode_theory(self, theory: Union[ClausalTheory, Sequence[Clause]],\n candidates: Dict[Predicate, Set[Clause]],\n originating_clause: Clause = None) -> Dict[Clause, Sequence[Clause]]:\n \"\"\"\n Encodes the entire theory with the provided candidates\n\n Args:\n theory (Theory): a set of clauses to encode\n candidates (Dict[Predicate, Set[Clause]]): clauses to use for encoding the theory\n\n \"\"\"\n self._logger.info(f'Encoding theory...')\n return dict([(x, self._encode_clause(x, candidates, originating_clause if originating_clause else x)) for x in (theory.get_formulas() if isinstance(theory, ClausalTheory) else theory)])\n\n def _find_redundancies(self, encoded_clauses: Dict[Clause, Sequence[Clause]]) -> Tuple[Dict[Sequence[str], Sequence[Clause]], Sequence[Sequence[str]]]:\n \"\"\"\n Identifies all redundancies in possible encodings\n\n Args:\n encoded_clauses (Dict[Clause, Set[Set[Literal]]]): encoded clauses\n \"\"\"\n self._logger.info(f'Finding redundancies...')\n\n redundancy_counts = {}\n cooccurrence_counts = {}\n\n for cl in encoded_clauses:\n inner_counts = {}\n\n for enc_cl in encoded_clauses[cl]:\n enc = enc_cl.get_atoms()\n for l in range(2, len(enc) + 1):\n for env_cmb in combinations(enc, l):\n if not are_variables_connected(env_cmb):\n continue\n\n env_cmb = sorted(env_cmb, key=lambda x: x.get_predicate().get_name())\n\n # order variables\n var_indices = {}\n for atm in env_cmb:\n for v in atm.get_variables():\n if v not in var_indices:\n var_indices[v] = len(var_indices)\n\n # count coocurrences of latent predicates\n pred_tuple = tuple([x.get_predicate().get_name() for x in env_cmb])\n if pred_tuple not in cooccurrence_counts:\n cooccurrence_counts[pred_tuple] = 0\n cooccurrence_counts[pred_tuple] += 1\n\n # create informative key (not depending on variable names)\n env_cmb = tuple([f'{x.get_predicate().get_name()}({\",\".join([str(var_indices[y]) for y in x.get_variables()])})' for x in env_cmb])\n\n if env_cmb not in inner_counts:\n inner_counts[env_cmb] = []\n inner_counts[env_cmb].append(enc_cl)\n\n for t in inner_counts:\n if t not in redundancy_counts:\n redundancy_counts[t] = []\n redundancy_counts[t] += inner_counts[t]\n\n return dict([(k, v) for k, v in redundancy_counts.items() if len(v) > 1]), [k for k, v in cooccurrence_counts.items() if v > 1]\n\n def _find_candidate_redundancies(self, candidates: Dict[Predicate, Set[Clause]]):\n \"\"\"\n Finds all redundancies in refactoring candidates and adds them to the self._candidate_exclusion\n\n \"\"\"\n if self.min_literals == self.max_literals:\n pass\n else:\n all_predicates = candidates.keys()\n for length in range(2, self.max_literals):\n for cmb in combinations(all_predicates, length):\n cands = reduce((lambda x, y: x.union(y)), [candidates[p] for p in cmb])\n cands = [x for x in cands if all([p in cmb for p in x.get_predicates()])]\n cands_exact_length = [x for x in cands if len(x) == length]\n cands_more_length = [x for x in cands if len(x) > length]\n\n redundancies = [[x.get_head().get_predicate()] + [y.get_head().get_predicate() for y in cands_more_length if len(x.is_part_of(y))] for x in cands_exact_length]\n redundancies = [x for x in redundancies if len(x) > 1]\n if len(redundancies):\n self.redundant_candidates += [tuple([p.get_name() for p in x]) for x in redundancies]\n\n def __create_var_map(self, model: cp_model.CpModel,\n candidates: Set[Clause],\n co_occurrences: Sequence[Sequence[str]],\n clause_dependencies: Dict[str, Sequence[str]]):\n \"\"\"\n Creates a CP-SAT variable for (1) each candidate clause and (2) an auxiliary variable for each combination of\n candidate clauses that appear in the encodings of clauses\n\n Also create the equivalences between aux variables and the original ones\n\n Args:\n model (CpModel): an instance of CP-SAT model\n candidates (Set[Clause]): a set of clauses defining latent predicates\n co_occurrences (List[Iterator[str]]): List of latent predicate that co occur in many encodings\n each co-occurrence should be represented as a tuple of strings (names of predicates)\n\n Returns:\n Dict[Union[predicate_name, Tuple[predicate_names]], cp-sat variable]\n \"\"\"\n variable_map = {}\n\n aux_var_index = 1\n\n for cand in candidates:\n # TODO: use entire clause as the variable key\n variable_map[cand.get_head().get_predicate().get_name()] = model.NewBoolVar(cand.get_head().get_predicate().get_name())\n\n for co in co_occurrences:\n variable_map[co] = model.NewBoolVar(f'aux{aux_var_index}')\n # create the equality relating the aux variable to a product of\n model.AddBoolAnd([variable_map[x] for x in co]).OnlyEnforceIf(variable_map[co])\n #model.AddMultiplicationEquality(variable_map[co], [variable_map[x] for x in co])\n\n # increase the index for the next aux var created\n aux_var_index += 1\n\n # add clause dependencies over different levels\n for cl in clause_dependencies:\n # add new var that will be true if all predicates from the body are selected\n b = model.NewBoolVar(f'aux_cldep_{self._get_candidate_index()}')\n model.AddBoolAnd([variable_map[c] for c in clause_dependencies[cl]]).OnlyEnforceIf(b)\n # selection of level+1 predicate implies selecting the predicates it depends on\n model.AddImplication(variable_map[cl], b)\n\n return variable_map\n\n def __impose_encoding_constraints(self, model: cp_model.CpModel,\n encodings: Dict[Clause, Sequence[ClausalTheory]],\n variable_map: Dict[Union[str, Iterator[str]], cp_model.IntVar]) -> Tuple[Dict[Clause, Sequence[cp_model.IntVar]], Dict[Clause, cp_model.IntVar]]:\n\n clause_level_selection_vars = {}\n encoding_clauses_to_vars = {}\n\n for clind, cl in enumerate(encodings):\n encs = encodings[cl]\n encs = [x.get_formulas() for x in encs] # each item is the encoding level, as list of formulas\n\n clause_level_selection_vars[cl] = [model.NewBoolVar(f'aux_level_{x+1}_{self._get_candidate_index()}') for x in range(len(encs) + 1)] # + 1 to get 'choose not refactoring'\n level_components = []\n\n # individual_encodings = []\n\n for l_ind, level in enumerate(encs):\n individual_encodings_at_current_level = []\n\n for eind, en in enumerate(level):\n plain_vars = sorted([a.get_predicate().get_name() for a in en.get_atoms()])\n\n # find all sub-components that can be substituted with an aux variable from co-occurences\n combs = []\n for l in range(2, len(plain_vars)):\n combs += list(combinations(plain_vars, l))\n\n combs = [tuple(x) for x in combs if tuple(x) in variable_map]\n\n # remove variables that are handled through the auxiliary variables\n all_in_aux = set()\n for aux in combs:\n all_in_aux = all_in_aux.union(aux)\n\n plain_vars = [x for x in plain_vars if x not in all_in_aux]\n\n # add product to individual encodings\n plain_vars = [variable_map[x] for x in plain_vars]\n combs = [variable_map[x] for x in combs]\n\n # ___ new encoding\n tmp_var = model.NewBoolVar(f'ind_enc_{clind}_{l_ind+1}_{eind}')\n\n # encoding with And/Or\n # encodes one possible refactoring and adds them all together in individual_encodings_at_current_level\n model.AddBoolAnd(plain_vars + combs).OnlyEnforceIf(tmp_var)\n model.AddBoolOr([x.Not() for x in (plain_vars + combs)]).OnlyEnforceIf(tmp_var.Not())\n\n individual_encodings_at_current_level.append(tmp_var)\n\n # add the var to the corresponding clause\n encoding_clauses_to_vars[encs[l_ind][eind]] = tmp_var\n\n # ENCODING WITH THE CURRENT LEVEL ONLY\n # takes individual encodings at the current level and makes an OR of all of them\n encoding_exists_var = model.NewBoolVar(f'aux_enc_exists_{self._get_candidate_index()}')\n model.AddBoolOr(individual_encodings_at_current_level).OnlyEnforceIf(encoding_exists_var)\n\n # And(level, Or(and individual encodings)) enforce only in entire_level_component (indicates that something from that level should be selected)\n level = clause_level_selection_vars[cl][l_ind+1] # l_ind = 0 is not refactoring\n entire_level_component = model.NewBoolVar(f'aux_levcom_{self._get_candidate_index()}')\n model.AddBoolAnd([level, encoding_exists_var]).OnlyEnforceIf(entire_level_component)\n level_components.append(entire_level_component)\n\n # at least one encoding has to be selected (or no encoding at all)\n if self.allow_no_refactoring:\n level_components += [clause_level_selection_vars[cl][0]]\n else:\n model.Add(clause_level_selection_vars[cl][0] == 0)\n\n if len(level_components):\n model.AddBoolOr(level_components)\n\n # exactly one encoding level has to be selected\n model.Add(sum(clause_level_selection_vars[cl]) == 1)\n\n return clause_level_selection_vars, encoding_clauses_to_vars\n\n def __eliminate_redundancy_in_solutions(self, model: cp_model.CpModel,\n redundancies: Dict[int, Dict[Sequence[str], Sequence[Clause]]],\n variable_map: Dict[Union[str, Iterator[str]], cp_model.IntVar],\n encoded_clause_vars: Dict[Clause, cp_model.IntVar],\n encoding_level_vars: Dict[Clause, Sequence[cp_model.IntVar]],\n reify: bool = False):\n\n to_return = []\n for level in redundancies:\n for redundancy_pattern in redundancies[level]:\n all_with_pattern = []\n for cl in redundancies[level][redundancy_pattern]:\n # + 1 because level=0 is no refactoring\n corresponding_level_var = encoding_level_vars[cl.get_property(\"parent\")][level + 1]\n b = model.NewBoolVar(f'aux_red_{self._get_candidate_index()}')\n model.AddBoolAnd([corresponding_level_var, encoded_clause_vars[cl]]).OnlyEnforceIf(b)\n model.AddBoolOr([corresponding_level_var.Not(), encoded_clause_vars[cl].Not()]).OnlyEnforceIf(b.Not())\n all_with_pattern.append(b)\n\n if reify:\n if self.minimise_redundancy_absolute_count:\n if self.equals_zero is None:\n self.equals_zero = model.NewBoolVar('equals_zero')\n model.Add(self.equals_zero == 0)\n\n out_b = model.NewIntVar(-1, len(all_with_pattern), f'aux_red_sum_{self._get_candidate_index()}')\n model.Add((sum(\n all_with_pattern) - 1) == out_b) # -1 allows to use 1 of the potentially redundant clauses, if only 1 is used there is no redundancy\n b_max = model.NewBoolVar(f'aux_redmax_{self._get_candidate_index()}')\n model.AddMaxEquality(b_max, [self.equals_zero, out_b])\n to_return.append(b_max)\n else:\n out_b = model.NewBoolVar(f'aux_red_sum_{self._get_candidate_index()}')\n model.Add(sum(all_with_pattern) <= 1).OnlyEnforceIf(\n out_b.Not()) # reasoning inverted because out_b=0 means no redundancy\n model.Add(sum(all_with_pattern) > 1).OnlyEnforceIf(out_b)\n to_return.append(out_b)\n else:\n model.Add(sum(all_with_pattern) <= 1)\n\n return to_return\n\n def __eliminate_candidate_alternatives(self, model: cp_model.CpModel,\n variable_map: Dict[Union[str, Iterator[str]], cp_model.IntVar]):\n \"\"\"\n Eliminates candidate alternatives:\n when multiple clause have the same body, but different head variables\n imposes the constaint that at most one of those can be selected\n\n\n Args:\n model (cp_model.CpModel): model\n variable_map (Dict[Union[str, Iterator[str]], cp_model.IntVar]): mapping from clauses to cp_model.vars\n \"\"\"\n for alt in self._candidate_exclusion:\n model.Add(sum([variable_map[x] for x in alt]) <= 1)\n\n def __eliminate_redundant_candidates(self, model: cp_model.CpModel,\n variable_map: Dict[Union[str, Iterator[str]], cp_model.IntVar],\n reify: bool = False):\n \"\"\"\n Eliminates redundant candidates in the solution (imposes the same constraint __eliminate_candidate_alternatives)\n\n \"\"\"\n to_return = []\n for alt in self.redundant_candidates:\n # model.AddBoolOr([variable_map[x].Not() for x in alt])\n if reify:\n b = model.NewBoolVar(f'aux_cr_{self._get_candidate_index()}')\n model.Add(sum([variable_map[x] for x in alt]) <= 1).OnlyEnforceIf(b.Not())\n model.Add(sum([variable_map[x] for x in alt]) > 1).OnlyEnforceIf(b)\n to_return.append(b)\n else:\n model.Add(sum([variable_map[x] for x in alt]) <= 1)\n\n return to_return\n\n def __set_objective(self, model: cp_model.CpModel,\n candidates: Set[Clause],\n variable_map: Dict[Union[str, Iterator[str]], cp_model.IntVar],\n redundancies: Dict[int, Dict[Sequence[str], Sequence[Clause]]],\n encoded_clause_vars: Dict[Clause, cp_model.IntVar],\n encoding_level_vars: Dict[Clause, Sequence[cp_model.IntVar]],\n encodings: Dict[Clause, Sequence[ClausalTheory]]):\n\n individual_redunds = []\n if self.minimise_redundancy and self.prevent_redundancies:\n individual_redunds += self.__eliminate_redundancy_in_solutions(model, redundancies, variable_map, encoded_clause_vars, encoding_level_vars, reify=True)\n\n if self.minimise_redundancy and self.exclude_redundant_candidates:\n individual_redunds += self.__eliminate_redundant_candidates(model, variable_map, reify=True)\n\n if self._objective_type == NUM_PREDICATES:\n vars_to_use = [x.get_head().get_predicate().get_name() for x in candidates]\n vars_to_use = [variable_map[x] for x in vars_to_use]\n\n if self.minimise_redundancy:\n model.Minimize(reduce((lambda x, y: x + y), vars_to_use + individual_redunds))\n else:\n model.Minimize(reduce((lambda x, y: x + y), vars_to_use))\n\n elif self._objective_type == NUM_LITERALS:\n all_weighted_clauses = []\n # lengths of selected clauses\n for cl in encodings:\n for ind, eth in enumerate(encodings[cl]):\n wcl = [(x, len(x) + 1) for x in eth.get_formulas()] # + 1 to include the head predicate\n\n level = encoding_level_vars[cl][ind+1]\n for f, cost in wcl:\n b = model.NewBoolVar(f'aux_and_{self._get_candidate_index()}')\n model.AddBoolAnd([level, encoded_clause_vars[f]]).OnlyEnforceIf(b)\n model.AddBoolOr([level.Not(), encoded_clause_vars[f].Not()]).OnlyEnforceIf(b.Not())\n all_weighted_clauses.append(b*cost)\n\n # add no refactoring cost, encoding/refactoring level = 0\n if self.allow_no_refactoring:\n all_weighted_clauses.append(encoding_level_vars[cl][0]*(len(cl) + 1))\n\n # lengths of selected candidates\n # exclude 1-length candidates because they help with partial refactoring:\n # the heads can simply be replaced by the body\n candidate_lengths = [(x.get_head().get_predicate().get_name(), len(x) + 1) for x in candidates if len(x) > 1]\n candidate_lengths = [variable_map[k]*v for k, v in candidate_lengths]\n\n if self.minimise_redundancy:\n model.Minimize(reduce((lambda x, y: x + y), all_weighted_clauses + candidate_lengths + individual_redunds))\n else:\n model.Minimize(\n reduce((lambda x, y: x + y), all_weighted_clauses + candidate_lengths))\n\n else:\n raise Exception(f'unknown objective function {self._objective_type}')\n\n def _map_to_solver_and_solve(self, candidates: Set[Clause],\n encodings: Dict[Clause, Sequence[ClausalTheory]],\n redundancies: Dict[int, Dict[Sequence[str], Sequence[Clause]]],\n cooccurrences: Sequence[Sequence[str]],\n clause_dependencies: Dict[str, Sequence[str]],\n max_predicates,\n num_threads,\n max_time_s):\n \"\"\"\n Maps the refactoring problem to CP-SAT and solves it\n\n Args:\n candidates (Set[Clause]): refactoring candidates\n encodings (Dict[Clause, Sequence[ClausalTheory]]): possible encodings of each a clause, per level\n redundancies (Dict[int, Dict[Sequence[str], Sequence[Clause]]]): redundandies per level\n Dict[key: encoding level\n value: Dict[key: redundancy pattern (sequence of predicate names)\n val: Sequence[Clauses] with the redundancy]\n\n cooccurrences (Sequence[Sequence[str]]):\n tuples of predicate variables that often co-occur and can be replaced with a single variable\n clause_dependencies (Dict[str, Sequence[str]]): dependence of predicates over different encoding levels\n\n \"\"\"\n\n self._logger.info(f'Mapping to CP and solving')\n\n model = cp_model.CpModel()\n variable_map = self.__create_var_map(model, candidates, cooccurrences, clause_dependencies)\n cls_level_indicators, encoded_cls_var = self.__impose_encoding_constraints(model, encodings, variable_map)\n\n if self.exclude_alternatives:\n self.__eliminate_candidate_alternatives(model, variable_map)\n\n if self.exclude_redundant_candidates and not self.minimise_redundancy:\n self.__eliminate_redundant_candidates(model, variable_map)\n\n if self.prevent_redundancies and not self.minimise_redundancy:\n # if we want to eliminate redundancy and we are minimizing the number of predicates\n self.__eliminate_redundancy_in_solutions(model, redundancies, variable_map, encoded_cls_var, cls_level_indicators)\n self.__set_objective(model, candidates, variable_map, redundancies if self.minimise_redundancy else (), encoded_cls_var, cls_level_indicators, encodings)\n\n if max_predicates:\n model.Add(reduce((lambda x, y: x + y), [variable_map[x.get_head().get_predicate().get_name()] for x in candidates]) <= max_predicates)\n\n solver = cp_model.CpSolver()\n solver.parameters.num_search_workers = num_threads\n if max_time_s:\n solver.parameters.max_time_in_seconds = max_time_s\n\n solution_callback = VarArraySolutionPrinter([variable_map[x.get_head().get_predicate().get_name()] for x in candidates], self._logger)\n self._logger.info(\"Started solving\")\n status = solver.SolveWithSolutionCallback(model, solution_callback) # solver.Solve(model)\n self._logger.info(f\"Solving done; status: {status}\")\n\n if status in (cp_model.OPTIMAL, cp_model.FEASIBLE, cp_model.UNKNOWN):\n selected_clauses = set([k for k, v in variable_map.items() if isinstance(k, str) and solver.Value(v) == 1])\n selected_clauses = [x for x in candidates if x.get_head().get_predicate().get_name() in selected_clauses]\n refactoring_steps_per_clause = {}\n for cl in cls_level_indicators:\n tmp = [solver.Value(x) for x in cls_level_indicators[cl]]\n refactoring_steps_per_clause[cl] = tmp.index(1) # + 1 - not needed because index would mean 1 step of refactoring\n return selected_clauses, refactoring_steps_per_clause\n else:\n raise Exception('Could not find a satisfiable solution!')\n\n def _prepare_final_theory(self, clauses: ClausalTheory,\n refactoring_predicates: Dict[Predicate, Set[Clause]],\n refactoring_steps: Dict[Clause, int]) -> ClausalTheory:\n \"\"\"\n Produces the final refactored theory\n\n Args:\n clauses (ClausalTheory): theory to refactor\n refactoring_predicates (Dict[Predicate, Set[Clause]]): encoding predicates to use\n refactoring_steps (Dict[Clause, int]]): number of refatoring steps for each clause in the theory\n\n Returns:\n refactorized theory\n\n \"\"\"\n self._logger.setLevel(logging.CRITICAL)\n final_theory = list(reduce((lambda x, y: x.union(y)), [[p for p in x if len(p) > 1] for x in refactoring_predicates.values()], set()))\n\n # resolve single literal clauses\n single_body_cands = list(reduce((lambda x, y: x.union(y)), [[p for p in c if len(p) == 1] for c in refactoring_predicates.values()], set()))\n single_mapping = dict([(x.get_head().get_predicate(), list(x.get_predicates())[0]) for x in single_body_cands])\n while any([x in single_mapping for x in single_mapping.values()]):\n for item in single_mapping:\n if single_mapping[item] in single_mapping:\n single_mapping[item] = single_mapping[single_mapping[item]]\n\n for cl in clauses.get_formulas():\n steps = refactoring_steps[cl]\n tmp_frm = cl\n\n while steps > 0:\n if not isinstance(tmp_frm, list):\n tmp_frm = [tmp_frm]\n re_frm = []\n if len(tmp_frm) > 1:\n for itm in tmp_frm:\n try:\n re_frm += self._encode_theory([itm], refactoring_predicates).values()\n except Exception:\n pass\n else:\n re_frm = self._encode_theory(tmp_frm, refactoring_predicates)\n re_frm = [x for x in re_frm.values()]\n re_frm = reduce((lambda x, y: x + y), re_frm)\n\n tmp_frm = [x for x in re_frm]\n steps -= 1\n\n if not isinstance(tmp_frm, list):\n tmp_frm = [tmp_frm]\n\n final_theory += tmp_frm\n\n # if any atoms refer to single literal clause\n new_frms = []\n for frm_itm in final_theory:\n if any([x in single_mapping for x in frm_itm.get_predicates()]):\n tmp_head = frm_itm.get_head()\n tmp_body = []\n for atm in frm_itm.get_atoms():\n if atm.get_predicate() in single_mapping:\n tmp_body.append(c_literal(single_mapping[atm.get_predicate()], atm.get_terms()))\n else:\n tmp_body.append(atm)\n\n new_frms.append(Clause(tmp_head, tmp_body))\n else:\n new_frms.append(frm_itm)\n\n self._logger.setLevel(self.log_level)\n\n return ClausalTheory(new_frms)\n\n def restructure(self, clauses: ClausalTheory, max_layers=None, max_predicate=None, num_threads=1, max_time_s=None,\n prune_candidates=False):\n \"\"\"\n Starts the restructuring process\n\n Args:\n clauses: a theory to restructure\n\n Return:\n a new restructured theory\n \"\"\"\n self.aux_candidate_counter = 0\n self.count_candidates = 0\n candidatesPruned = set()\n\n # 4 -- optimal 2 -- feasible 0 -- unknown 3 -- infeasible\n # print(cp_model.OPTIMAL, cp_model.FEASIBLE, cp_model.UNKNOWN, cp_model.INFEASIBLE)\n\n encodings_space: Dict[Clause, List[ClausalTheory]] = dict([(f, []) for f in clauses.get_formulas()])\n all_refactoring_candidates: Dict[Predicate, Set[Clause]] = {}\n all_redundancies = {}\n # ^- Dict[ key: encoding level, to be used to select the appropriate encoding level var )\n # val: Dict[ key: redundancy string (tuple)\n # val: encoded clauses Sequence[Clause] ] ]\n all_cooccurences = []\n clause_dependencies = {}\n\n something_to_refactor: bool = True\n iteration_counter = 0\n\n while something_to_refactor:\n self.candidate_usage_count = {}\n self.enumerated_body_signatures = {}\n self._logger.info(f\"\\tStarting iteration: {iteration_counter}\")\n # collect clauses to focus on\n if iteration_counter == 0:\n focus_clauses = list(encodings_space.keys())\n else:\n focus_clauses = reduce((lambda x, y: x + y),\n [encodings_space[x][iteration_counter-1].get_formulas() for x in encodings_space if len(encodings_space[x]) == iteration_counter], [])\n focus_clauses = [x for x in focus_clauses if len(x) >= self.min_literals]\n\n iteration_candidates = self._get_candidates(focus_clauses)\n\n #self._find_candidate_redundancies(iteration_candidates)\n # save the current candidates to the global collection\n # all_refactoring_candidates.update(iteration_candidates)\n\n if iteration_counter > 0:\n for p in iteration_candidates:\n for cl in iteration_candidates[p]:\n head_p = cl.get_head().get_predicate().get_name()\n dep_ps = [x.get_predicate().get_name() for x in cl.get_atoms()]\n if head_p not in clause_dependencies:\n clause_dependencies[head_p] = dep_ps\n\n self._logger.info(f\"\\t\\tfound {self.count_candidates} candidates\")\n\n iteration_formulas = {}\n\n # extend the encoding of each original clause with the new layer of encodings\n if iteration_counter == 0:\n encoded_clauses = self._encode_theory(clauses, iteration_candidates)\n #iteration_formulas.update(encoded_clauses)\n for cl in focus_clauses:\n frms_to_add = [v for v in encoded_clauses[cl]]\n if len(frms_to_add):\n encodings_space[cl].append(ClausalTheory(frms_to_add))\n else:\n for cl in encodings_space:\n if len(encodings_space[cl]) == iteration_counter:\n encoded_clauses = self._encode_theory([x for x in encodings_space[cl][iteration_counter-1].get_formulas() if len(x) >= self.min_literals], iteration_candidates, originating_clause=cl)\n if len(encoded_clauses) == 0:\n continue\n #iteration_formulas.update(encoded_clauses)\n # add encodings of all of the clauses\n frms_to_add = reduce((lambda x, y: x + y), [v for k, v in encoded_clauses.items()], [])\n if len(frms_to_add):\n encodings_space[cl].append(ClausalTheory(frms_to_add))\n else:\n pass\n\n # if pruning is required\n # has to be done after the encoding as that is where the counts happen\n if prune_candidates:\n iteration_candidates, rejectedCandidates = self._prune_candidate_set(iteration_candidates)\n rejectedPredicates = set(sorted([x.get_head().get_predicate() for x in rejectedCandidates], key=lambda x: str(x)))\n false_exclusions = set()\n for cl in encodings_space:\n if len(encodings_space[cl]) > iteration_counter:\n formulas_to_remove = encodings_space[cl][-1].get_formulas(rejectedPredicates)\n\n # if no refactoring is left after removing rejected predicates,\n # retain the rejected predicates that were used\n if len(formulas_to_remove) == len(encodings_space[cl][-1]):\n used_preds = reduce((lambda x, y: x.union(y)), [x.get_predicates() for x in formulas_to_remove], set())\n false_exclusions = false_exclusions.union(rejectedPredicates.intersection(used_preds))\n\n # add false rejections to the iterations candidates\n matching_falsely_rejected_candidates = [x for x in rejectedCandidates if x.get_head().get_predicate() in false_exclusions]\n for item in matching_falsely_rejected_candidates:\n for p in item.get_predicates():\n iteration_candidates[p].add(item)\n\n rejectedPredicates = rejectedPredicates.difference(false_exclusions)\n candidatesPruned = candidatesPruned.union(rejectedCandidates.difference(matching_falsely_rejected_candidates))\n all_refactoring_candidates.update(iteration_candidates)\n\n # clean the refactored theories\n for cl in encodings_space:\n if len(encodings_space[cl]) > iteration_counter:\n encodings_space[cl][-1].remove_formulas_with_predicates(rejectedPredicates)\n iteration_formulas[cl] = encodings_space[cl][-1].get_formulas()\n\n # clear the clause dependencies\n for rej_can in rejectedPredicates:\n if rej_can.get_name() in clause_dependencies:\n del clause_dependencies[rej_can.get_name()]\n\n # clear alternatives\n rejectedPredicates = set([x.get_name() for x in rejectedPredicates])\n self._candidate_exclusion = [x for x in self._candidate_exclusion if not any([p in rejectedPredicates for p in x])]\n else:\n all_refactoring_candidates.update(iteration_candidates)\n for cl in encodings_space:\n if len(encodings_space[cl]) > iteration_counter:\n iteration_formulas[cl] = encodings_space[cl][-1].get_formulas()\n\n # find candidate redundancies\n if self.exclude_redundant_candidates:\n self._logger.info(\"\\t Finding redundancies amongst candidates\")\n self._find_candidate_redundancies(iteration_candidates)\n self._logger.info(\"\\t\\t\\t done!\")\n\n if self.prevent_redundancies:\n # detect all redundancies and co-occurences\n tmp_redundancies, tmp_cooccurrences = self._find_redundancies(iteration_formulas)\n # all_cooccurences += tmp_cooccurrences\n if len(tmp_redundancies):\n all_redundancies[iteration_counter] = tmp_redundancies\n\n iteration_counter += 1\n\n if iteration_counter == max_layers or len(focus_clauses) == 0:\n something_to_refactor = False\n\n distinct_candidates = set()\n for p in all_refactoring_candidates:\n distinct_candidates = distinct_candidates.union(all_refactoring_candidates[p])\n\n self._logger.info(f\"Found {self.count_candidates} candidates in total; pruned {len(candidatesPruned)}\")\n\n selected_clauses, refactoring_steps = self._map_to_solver_and_solve(distinct_candidates, encodings_space,\n all_redundancies, all_cooccurences, clause_dependencies,\n max_predicate, num_threads, max_time_s)\n\n # create_clause_index\n cl_ind = {}\n for cl in selected_clauses:\n pps = cl.get_predicates()\n for p in pps:\n if p not in cl_ind:\n cl_ind[p] = set()\n cl_ind[p].add(cl)\n\n final_theory = self._prepare_final_theory(clauses, cl_ind, refactoring_steps)\n\n return selected_clauses, final_theory\n\n\nclass VarArraySolutionPrinter(cp_model.CpSolverSolutionCallback):\n \"\"\"Print intermediate solutions.\"\"\"\n\n def __init__(self, variables, logger):\n cp_model.CpSolverSolutionCallback.__init__(self)\n self.__variables = variables\n self.__solution_count = 0\n self._logger = logger\n\n def on_solution_callback(self):\n self.__solution_count += 1\n self._logger.info(f\"\\tIteration {self.__solution_count}: objective {self.ObjectiveValue()}, selected {sum([1 for x in self.__variables if self.Value(x) == 1])}\")\n\n def solution_count(self):\n return self.__solution_count" }, { "alpha_fraction": 0.5758097171783447, "alphanum_fraction": 0.5920826196670532, "avg_line_length": 26.860261917114258, "blob_id": "4018658168b2f9929efed11132f59a36c73088ee", "content_id": "a7289e2dc9b288dc847d7b285b744f585e1c0303", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6391, "license_type": "no_license", "max_line_length": 157, "num_lines": 229, "path": "/tests/simpletest.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "from pyswip import Prolog\nfrom loreleai.language.lp import parse, Theory, ClausalTheory\nfrom loreleai.learning.restructuring import Restructor, NUM_PREDICATES, NUM_LITERALS\nimport time\nimport os\nimport logging\nimport networkx as nx\nimport cProfile\n\n\ndef knorf_test():\n\n dataset = \"strings-600-5\"\n\n theory_file = os.path.dirname(__file__) + f\"/../data/restructuring/{dataset}.pl\"\n theory = ClausalTheory(read_from_file=theory_file)\n\n unfolded_theory = theory.unfold()\n unfolded_theory.remove_duplicates()\n\n frms = unfolded_theory.get_formulas()\n # frms = frms[2:]\n # frms = [frms[0]] + frms[2:]\n # frms = frms[:8] + frms[9:]\n #frms = frms[10:30]\n frms_rec = [x for x in frms if x.get_head().get_predicate() in unfolded_theory._recursive_predicates]\n\n frms_single = [x for x in frms if len(x) == 1]\n\n unfolded_theory = ClausalTheory([x for x in frms if len(x) > 1 or len(x) == 1 and x.get_head().get_predicate() in unfolded_theory._recursive_predicates])\n\n restructurer = Restructor(max_literals=3, min_literals=2,\n max_arity=2, head_variable_selection=2, reject_singleton=True, prevent_redundancies=True,\n logl=logging.INFO, minimise_redundancy=True, exclude_redundant_cands=False, allow_no_refactoring=False,\n exclude_alternatives=False, exact_redundancy=False, objective_type=NUM_LITERALS)\n\n #cProfile.run(\"restructurer.restructure(unfolded_theory, max_layers=None, max_predicate=19, num_threads=4, prune_candidates=True)\")\n cls, thr = restructurer.restructure(unfolded_theory, max_layers=None, max_predicate=None, num_threads=4, prune_candidates=True)\n\n print(f\"selected clauses ({len(cls)}): \\n\" + \"\\n\".join([str(x) for x in sorted(cls, key=lambda x: str(x))]))\n print(\"\")\n\n # print(f\"original theory ({len(theory.get_number_of_predicates())}):\\n{theory}\")\n #\n # print(\"\")\n # print(f\"unfolded theory ({len(unfolded_theory)}):\\n{unfolded_theory}\")\n #\n # print(\"\")\n # print(f\"Encoded theory ({len(theory.get_number_of_predicates())}):\\n{thr}\")\n\n # print(\"refactored theory\")\n # for f in thr.get_formulas():\n # print(f\"\\t{f}\")\n\n # thr.visualize(f\"refactored_{dataset}.pdf\", only_numbers=True)\n # theory.visualize(f\"original_{dataset}.pdf\", only_numbers=False)\n\n print(f\"Original theory has {theory.num_literals()} literals, while encoded theory has {thr.num_literals()}\")\n print(f\"Original theory has {len(theory.get_predicates())} predicates, while refactored theory has {len(thr.get_predicates())}\")\n\n\ndef z3_test():\n from z3 import Fixedpoint, BitVecSort, Function, BoolSort, Const, BitVecVal\n fp = Fixedpoint()\n fp.set(engine='datalog')\n\n #print(fp.help())\n\n s = BitVecSort(3)\n edge = Function('edge', s, s, BoolSort())\n path = Function('path', s, s, BoolSort())\n\n a = Const('a', s)\n b = Const('b', s)\n c = Const('c', s)\n\n fp.register_relation(path, edge)\n fp.declare_var(a, b, c)\n fp.rule(path(a, b), edge(a, b))\n fp.rule(path(a, c), [edge(a, b), path(b, c)])\n\n v1 = BitVecVal(1, s)\n v2 = BitVecVal(2, s)\n v3 = BitVecVal(3, s)\n v4 = BitVecVal(4, s)\n\n f1 = edge(v1, v2)\n fp.fact(f1)\n fp.fact(edge(v1, v3))\n fp.fact(edge(v2, v4))\n\n #fp.help()\n\n print(\"current set of rules\", fp)\n\n first_answer = fp.query(path(b, a))\n #print(fp.query(path(b, a)), \"yes we can reach v4 from v1\")\n ans = fp.get_answer()\n print(ans)\n # ans = ans.children()\n # print(ans, type(ans))\n #\n # second_answer = fp.query(path(v3, v4))\n # print(fp.query(path(v3, v4)), \"no we cannot reach v4 from v3\")\n\n\ndef z3_granparent():\n from z3 import Fixedpoint, BitVecSort, Function, BoolSort, Const, BitVecVal,\\\n FiniteDomainSort, StringSort, DeclareSort\n fp = Fixedpoint()\n fp.set(engine='datalog')\n\n s = BitVecSort(2)\n\n parent = Function(\"parent\", s, s, BoolSort())\n granparent = Function(\"grandparent\", s, s, BoolSort())\n fp.register_relation(parent)\n fp.register_relation(granparent)\n\n p1 = BitVecVal(1, s)\n p2 = BitVecVal(2, s)\n p3 = BitVecVal(3, s)\n\n v1 = Const(\"v1\", s)\n v2 = Const(\"v2\", s)\n v3 = Const(\"v3\", s)\n\n fp.declare_var(v1)\n fp.declare_var(v2)\n fp.declare_var(v3)\n\n fp.rule(granparent(v1, v2), [parent(v1, v3), parent(v3, v2)])\n\n fp.fact(parent(p1, p2))\n fp.fact(parent(p2, p3))\n\n print(fp)\n\n print(fp.query(parent(v1, v2), parent(v2, v3)))\n print(fp.get_answer())\n\n\ndef minikanren_test():\n from kanren import Relation, facts, run, var, conde\n\n parent = Relation()\n facts(parent, (\"Homer\", \"Bart\"),\n (\"Homer\", \"Lisa\"),\n (\"Abe\", \"Homer\"))\n\n x = var()\n y = var()\n z = var()\n\n def grandparent(*args):\n y = var()\n return conde((parent(args[0], y), parent(y, args[1])))\n\n #grandparent = lambda *x: conde([parent(x[0], z), parent(z, x[1])])\n\n print(run(0, [x, y], grandparent(x, y)))\n\n\ndef minikanren_test_2():\n from kanren import Relation, facts, run, var, conde, reify, lany\n from kanren.core import Zzz\n\n\n parent = Relation()\n facts(parent, (\"Homer\", \"Bart\"),\n (\"Homer\", \"Lisa\"),\n (\"Abe\", \"Homer\"))\n\n x = var()\n y = var()\n z = var()\n\n def ancestor(x, y):\n z = var()\n return conde([parent(x, y)], [parent(x, z), Zzz(ancestor, z, y)])\n\n #grandparent = lambda *x: conde([parent(x[0], z), parent(z, x[1])])\n\n print(run(0, [y, x], ancestor(y, x)))\n\n\ndef kanren_graph():\n from kanren import Relation, facts, run, var, conde, reify, lany\n from kanren.core import Zzz\n\n edge = Relation()\n facts(edge, (\"v1\", \"v2\"),\n (\"v1\", \"v3\"),\n (\"v2\", \"v4\"))\n\n def path(x, y):\n z = var()\n return conde([edge(x, y)], [edge(x, z), Zzz(path, z, y)])\n\n x = var()\n y = var()\n print(run(0, [x, y], path(x, y)))\n\n\ndef python_test():\n\n def create_inner_func_with_args():\n a = 7\n ad = {\"this\": \"is available\"}\n\n def sum_it_up(*args, add=a, sadd=ad):\n print(\"this\", sadd[\"this\"])\n return sum(args) + add\n\n return sum_it_up\n\n ff = create_inner_func_with_args()\n\n print(ff(1))\n\n\n#z3_test()\nknorf_test()\n#z3_granparent()\n#minikanren_test()\n#minikanren_test_2()\n#kanren_graph()\n\n#python_test()\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.48993057012557983, "alphanum_fraction": 0.520312488079071, "avg_line_length": 34.88785171508789, "blob_id": "c15c2d9261dd056a6b1b346e7632e2e5785d1440", "content_id": "7f74357c6105cef45312fb7a98e356a51aca47a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11520, "license_type": "no_license", "max_line_length": 719, "num_lines": 321, "path": "/playgol_experiments/e2-strings/runner.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "import sys\nimport math\nimport subprocess\nimport numpy as np\nimport scipy.stats as stats\nimport random\nimport string\nimport datetime\nimport os\n\nnum_trials=10\nplaytimes = list(range(200,4200,200))\ntrials = list(range(1,num_trials+1))\nsystems=['playgol'] #,'nopi']\nmax_string_size = 20\n\ndef call_prolog(action,load_files,output):\n cmd = \"load_files(['experiment',{}],[silent(true)]). \".format(','.join(load_files))\n cmd += '{}.'.format(action)\n print(cmd)\n with open(output, 'w') as outf:\n p = subprocess.Popen(['swipl','-q','-G8g','-T8g','-L8g'], stdin=subprocess.PIPE, stdout=outf)\n p.stdin.write(cmd.encode())\n (output, err) = p.communicate()\n\ndef gen_syn_data(playtime,k):\n call_prolog('b({})'.format(playtime),[\"'gen-exs'\"],'data/play-{}-{}.pl'.format(playtime,k))\n\ndef load_probs(nowrites=True):\n dic = {}\n with open('probs.txt', 'r') as f:\n for line in f:\n if line.startswith('# -*- coding: utf-8 -*-'):\n continue\n xs=line.strip().split('=>')\n if len(xs)==1 and len(xs[0])>0:\n try:\n problem='b'+xs[0][2:]\n except:\n continue\n elif len(xs)>1:\n if problem not in dic:\n dic[problem] = []\n dic[problem].append(xs)\n return dic\n\ndef gen_data(dic,tasks):\n random.shuffle(tasks)\n print_train=[]\n print_test=[]\n for problem,examples in dic.items():\n if len(examples) < 10:\n continue\n random.shuffle(examples)\n train = examples[:5]\n test = examples[5:10]\n for x in train:\n print_train.append((problem,list(x[0].strip()),list(x[1].strip())))\n for x in test:\n print_test.append((problem,list(x[0].strip()),list(x[1].strip())))\n\n for problem,a,b in print_train:\n yield 'build_pos({},{},{}).\\n'.format(problem,a,b)\n for problem,a,b in print_test:\n yield 'test_pos({},{},{}).\\n'.format(problem,a,b)\n yield 'tasks({}).\\n'.format(tasks)\n\n\ndef gen_real_data():\n dic=load_probs()\n tasks = list(dic.keys())\n for x in gen_data(dic,tasks):\n yield x\n\ndef do_gen_data():\n for k in trials:\n for playtime in playtimes:\n gen_syn_data(playtime,k)\n with open('data/build-{}.pl'.format(k),'w') as f:\n for x in gen_real_data():\n f.write( x + '\\n')\n\ndef play_and_buid():\n for system in systems:\n for k in trials:\n for p in playtimes:\n playf=\"'data/play-{}-{}'\".format(p,k)\n buildf=\"'data/build-{}'\".format(k)\n programf=f\"programs/{system}/{p}-{k}.pl\"\n call_prolog('a',[playf,buildf],programf)\n\ndef test():\n for system in systems:\n for k in trials:\n for p in playtimes:\n buildf=\"'data/build-{}'\".format(k)\n programf=f\"'programs/{system}/{p}-{k}.pl'\"\n resultsf=f'results/{system}/{p}-{k}.pl'\n call_prolog('do_test',[buildf,programf],resultsf)\n\n# def read_file(p,k):\n# num_solved=0\n# acc=[]\n# fname = f'results/{system}/{p}-{k}.pl'\n# with open(,'r') as f:\n# for line in f:\n# line=line.strip()\n# xs=line.split(',')\n# if len(xs) <2:\n# continue\n# if line.startswith('%solved'):\n# num_solved+=int(xs[2])\n# else:\n# # k_instances+=1\n# acc+=[int(xs[1])]\n# return num_solved,acc\n\ndef get_acc(system,p):\n tasks=['b36', 'b132', 'b246', 'b167', 'b87', 'b304', 'b47', 'b94', 'b284', 'b116', 'b157', 'b239', 'b224', 'b285', 'b215', 'b179', 'b92', 'b227', 'b111', 'b99', 'b35', 'b38', 'b307', 'b91', 'b151', 'b83', 'b61', 'b247', 'b298', 'b67', 'b120', 'b325', 'b63', 'b7', 'b48', 'b33', 'b27', 'b108', 'b78', 'b252', 'b133', 'b1', 'b80', 'b139', 'b100', 'b308', 'b30', 'b136', 'b109', 'b3', 'b103', 'b149', 'b323', 'b29', 'b34', 'b189', 'b293', 'b134', 'b43', 'b314', 'b326', 'b324', 'b188', 'b123', 'b137', 'b98', 'b4', 'b283', 'b300', 'b249', 'b162', 'b24', 'b56', 'b292', 'b241', 'b327', 'b23', 'b6', 'b238', 'b186', 'b81', 'b156', 'b73', 'b102', 'b153', 'b113', 'b37', 'b76', 'b196', 'b5', 'b309', 'b25', 'b184', 'b181']\n\n all_accs=[]\n accs=[]\n for k in trials:\n k_acc=[]\n fname = f'results/{system}/{p}-{k}.pl'\n with open(fname,'r') as f:\n data=f.read()\n probs=data.split('%')\n for prob in probs:\n xs=prob.split('\\n')\n if len(xs) == 0:\n continue\n if xs[0].startswith('solved'):\n (_,t,solved) = xs[0].split(',')\n if t not in tasks:\n continue\n for vs in xs[1:]:\n vs = vs.split(',')\n if len(vs) != 2:\n continue\n all_accs.append(int(vs[1]))\n k_acc.append(int(vs[1]))\n print({'system': 'metagol', 'playtask': p, 'trial': k, 'accuracy': np.mean(k_acc)})\n accs.append(np.mean(k_acc))\n return (np.mean(accs),stats.sem(accs),all_accs)\n\ndef results():\n system_accs = {}\n for system in systems:\n print(system)\n system_accs[system]=[]\n for p in playtimes:\n #print(f\"play: {p}\")\n (acc,sem,all_accs) = get_acc(system,p)\n system_accs[system].extend(all_accs)\n #print('({},{}) +- (0,{})'.format(p,round(acc*100,2),round(sem*100,2)))\n\n\ndef parse_runtime(setup, p, k):\n programf = f\"./programs/{setup}/{p}-{k}.pl\"\n start_time = None\n end_time = None\n fil = open(programf)\n\n for line in fil.readlines():\n if '% started solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n start_time = datetime.datetime(day=int(tmp[6]), month=int(tmp[7]), year=int(tmp[8]), hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n if '% finished solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n end_time = datetime.datetime(day=int(tmp[6]), month=int(tmp[7]), year=int(tmp[8]), hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n\n if end_time is None:\n end_time = datetime.datetime.fromtimestamp(os.path.getmtime(programf))\n return (end_time - start_time).total_seconds()\n\ndef parse_runtime_new(setup, p, k):\n programf = f\"./programs/{setup}/{p}-{k}.pl\"\n start_time = None\n end_time = None\n fil = open(programf)\n times = []\n\n for line in fil.readlines():\n if '% started solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n start_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif '% finished solving build tasks' in line:\n tmp = line.strip().split()\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n time = tmp[-1].split(\":\")\n end_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif 'timeout' in line:\n times.append({'type': 'No refactoring', 'playtasks': p, 'trial': k, 'runtime': 60})\n start_time=None\n\n if start_time is not None and end_time is not None:\n times.append({'type': 'No refactoring', 'playtasks': p, 'trial': k, 'runtime': (end_time - start_time).total_seconds()})\n start_time=None\n end_time=None\n\n return times\n\ndef parse_runtime_trial(setup, p, k):\n programf = f\"./programs/{setup}/{p}-{k}.pl\"\n start_time = None\n end_time = None\n fil = open(programf)\n times = []\n last_timeout_count = 0\n\n for line in fil.readlines():\n if '% started solving build tasks' in line and start_time == None:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n start_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif '% finished solving build tasks' in line:\n tmp = line.strip().split()\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n time = tmp[-1].split(\":\")\n last_timeout_count = 0\n end_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif 'timeout' in line:\n last_timeout_count += 1\n\n return {'type': 'No refactoring', 'playtasks': p, 'trial': k, 'runtime': (end_time - start_time).total_seconds() + last_timeout_count*60}\n\ndef runtimes_new():\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime_new('playgol', p, t)\n runtimes += seconds\n for item in seconds:\n print(item)\n #runtimes.append({'type': 'originl', 'playtasks': p, 'trial': t, 'runtime': seconds})\n #print({\"system\": \"metagol\", \"playtasks\": p, \"trial\": t, \"runtime\": seconds})\n\n return runtimes\n\ndef runtimes_trial():\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime_trial('playgol', p, t)\n print(str(seconds)+ \",\")\n #runtimes.append({'type': 'originl', 'playtasks': p, 'trial': t, 'runtime': seconds})\n #print({\"system\": \"metagol\", \"playtasks\": p, \"trial\": t, \"runtime\": seconds})\n\n return runtimes\n\n\ndef runtimes():\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime('playgol', p, t)\n runtimes.append({'type': 'originl', 'playtasks': p, 'trial': t, 'runtime': seconds})\n print({\"system\": \"metagol\", \"playtasks\": p, \"trial\": t, \"runtime\": seconds})\n\n return runtimes\n\ndef get_build_program_size(setup, p, k):\n programf = f\"./programs/{setup}/{p}-{k}.pl\"\n count = 0\n fil = open(programf)\n\n for line in fil.readlines():\n if line.startswith('%') or len(line) < 3 or 'true' in line:\n continue\n elif line.startswith('p'):\n head, body = line.strip().split(':-')\n body = body.replace('.', ',')\n body = body.split('),')\n count += 1 + len(body)\n else:\n pass\n\n return count\n\ndef build_program_size():\n SETUP = f'playgol'\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = get_build_program_size(SETUP, p, t)\n runtimes.append({'type': 'refactored', 'playtasks': p, 'trial': t, 'runtime': seconds})\n print({\"system\": \"metagol\", \"playtask\": p, \"trial\": t, \"program_size\": seconds})\n\n return runtimes\n\ncmd = sys.argv[1]\ntrials = [int(x) for x in sys.argv[2].split(',')]\n\nif cmd == 'gen':\n do_gen_data()\nif cmd == 'learn':\n play_and_buid()\nif cmd == 'test':\n test()\nif cmd == 'results':\n results()\nif cmd == 'runtime':\n runtimes_new()\nif cmd == 'runtime_trial':\n runtimes_trial()\nif cmd == 'program-size':\n build_program_size()\n" }, { "alpha_fraction": 0.5284656882286072, "alphanum_fraction": 0.5605887174606323, "avg_line_length": 34.07573318481445, "blob_id": "5477fe7f1f79be74464b587035a17c82141e7444", "content_id": "5d5ad9cecda66984ebbd5fb56b9daf31a11f8de1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22694, "license_type": "no_license", "max_line_length": 719, "num_lines": 647, "path": "/experiments/knorf/runners/strings/runner_nonredundant.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "import datetime\nimport os\nimport random\nimport subprocess\nimport sys\n\nimport numpy as np\nimport scipy.stats as stats\n\nnum_trials=10\nplaytimes = list(range(200, 4200, 200)) #[200, 400, 600, 800, 1000] #, 400, 600, 800, 1000] # list(range(0,2200,200))\ntrials = [1,2,3,4,5,6,7,8,9,10]\nsystems=['playgol','nopi']\nmax_string_size = 20\nROOT_FILE = os.path.dirname(os.path.abspath(__file__)) + \"/../..\"\nMIN_LITERALS = 2\nMAX_LITERALS = 3\nMAX_LAYERS = None\nMAX_TIME_S = 5400\nPRUNE = True\nEXCLUDE_ALTERNATIVES = False\nEXCLUDE_REDUNDANT_CANDS = True\nEXCLUDE_REDUNDANCIES = True\nMINIMISE_REDUNDANCIES = True\nREJECT_SINGLETONS = True\n\nexperiment_files = \\\n\"\"\"\n:-['../playgol'].\n:-['string-bk'].\n:-['../metagol'].\n\"\"\"\n\nexperiment_setup = \\\n\"\"\"\n\n:- use_module(library(time)).\n\nplay_time_interval(2).\nmax_build_time(60).\nmax_play_depth(4).\nmax_build_depth(5).\ncpus(4).\n\nmetagol:functional.\n%% metagol:max_clauses(10).\n\nfunc_test([P,s(In,Out1),s(_,[])],PS,G):-\n \\+ (metagol :prove_deduce([[P,s(In,Out2),s(_,[])]],PS,G),Out1\\= Out2).\n\"\"\"\n\nexperiment_primitives = \\\n\"\"\"\n%% tell metagol to use the BK\nprim(is_empty/1).\nprim(not_empty/1). % maybe?\n\nprim(is_space/1).\nprim(not_space/1). % maybe?\n\nprim(is_uppercase/1).\nprim(is_lowercase/1).\n\nprim(is_letter/1).\nprim(not_letter/1).\n\nprim(is_number/1).\nprim(not_number/1).\n\nprim(copy1/2).\nprim(skip1/2).\nprim(mk_uppercase/2).\nprim(mk_lowercase/2).\n%% prim(write1/3).\n\"\"\"\n\n\nexperiment_rest = \\\n\"\"\"\nmetarule(precon,[P/2,Q/1,R/2],([P,A,B]:-[[Q,A],[R,A,B]])).\nmetarule(postcon,[P/2,Q/2,R/1],([P,A,B]:-[[Q,A,B],[R,B]])).\nmetarule(chain,[P/2,Q/2,R/2],([P,A,B]:-[[Q,A,C],[R,C,B]])).\nmetarule(tailrec,[P/2,Q/2],([P,A,B]:-[[Q,A,C],[P,C,B]])).\n%% metarule(curry3,[P/2,Q/3,C/0],([P,A,B]:-[[Q,A,B,C]])).\n\n \na:-\n cpus(CPU_COUNT),\n set_prolog_flag(cpu_count,CPU_COUNT),\n games(Games),\n playgol(Games),\n b,\n halt.\n\nb:-\n cpus(CPU_COUNT),\n set_prolog_flag(cpu_count,CPU_COUNT),\n get_time(T),\n stamp_date_time(T, date(DY,DM,DD,TH,TM,TS,_,_,_), 'UTC'),\n format('% started solving build tasks at ~w ~w ~w ~w:~w:~w\\\\n', [DD, DM, DY, TH, TM, TS]),\n max_build_depth(BuildDepth),\n playgol:update_depth(BuildDepth),\n max_build_time(BuildTime),\n retractall(max_time(_)),\n assert(max_time(BuildTime)),\n tasks(Tasks),\n learn_tasks(Tasks,Progs),\n length(Progs,N),\n format('% num solved ~w\\\\n',[N]),\n get_time(T2),\n stamp_date_time(T2, date(DY2,DM2,DD2,TH2,TM2,TS2,_,_,_), 'UTC'),\n format('% finished solving build tasks at ~w ~w ~w ~w:~w:~w\\\\n', [DD2, DM2, DY2, TH2, TM2, TS2]),\n halt.\n\nlearn_tasks(Tasks,Progs):-\n concurrent_maplist(learn_aux,Tasks,Xs),\n findall(true,member(true,Xs),Progs).\n\nlearn_aux(T,true):-\n train_examples(T,Pos,Neg),\n max_time(MaxTime),\n catch(call_with_time_limit(MaxTime,learn(Pos,Neg,Prog)),time_limit_exceeded,(writeln('%timeout'),false)),!,\n pprint(Prog).\nlearn_aux(_,false).\n\ndo_test:-\n tasks(Tasks),\n maplist(do_test,Tasks),\n halt.\n\ndo_test(Task):-\n test_examples(Task,Pos),\n (current_predicate(Task/2) ->\n (\n format('%solved,~w,~w\\\\n',[Task,1]),\n forall(member(X,Pos),(call(X) -> format('~w,~w\\\\n',[1,1]); format('~w,~w\\\\n',[1,0])))\n );\n (\n format('%solved,~w,~w\\\\n',[Task,0]),\n forall(member(_,Pos),format('~w,~w\\\\n',[0,0]))\n )).\n\ngen_e(Task,Input,Output,Out):-\n Out=..[Task,s(Input,Output),s(_,[])].\n\nplay_examples(Task,Pos,[]):-\n findall(X,(play_pos(Task,A,B),gen_e(Task,A,B,X)),Pos1),\n sort_examples(Pos1,Pos).\n\ntrain_examples(Task,Pos,[]):-\n findall(X,(build_pos(Task,A,B),gen_e(Task,A,B,X)),Pos1),\n sort_examples(Pos1,Pos).\n\ntest_examples(Task,Pos):-\n findall(X,(test_pos(Task,A,B),gen_e(Task,A,B,X)),Pos).\n\nadd_len(Atom,Len-Atom):-\n Atom=..[_Task,s(Input,_Output),s(_,[])],\n length(Input,Len).\nremove_len(_-Atom,Atom).\n\nsort_examples(L1,L2):-\n maplist(add_len,L1,L3),\n keysort(L3,L4),\n maplist(remove_len,L4,L2).\n\"\"\"\n\ndef call_prolog(action,load_files,output):\n cmd = \"load_files(['experiment',{}],[silent(true)]). \".format(','.join(load_files))\n cmd += '{}.'.format(action)\n print(cmd)\n with open(output, 'w') as outf:\n p = subprocess.Popen(['swipl','-q','-G8g','-T8g','-L8g'], stdin=subprocess.PIPE, stdout=outf)\n p.stdin.write(cmd.encode())\n (output, err) = p.communicate()\n\ndef gen_syn_data(playtime,k):\n call_prolog('b({})'.format(playtime),[\"'gen-exs'\"],'data/play-{}-{}.pl'.format(playtime,k))\n\ndef load_probs(nowrites=True):\n dic = {}\n with open('probs.txt', 'r') as f:\n for line in f:\n if line.startswith('# -*- coding: utf-8 -*-'):\n continue\n xs=line.strip().split('=>')\n if len(xs)==1 and len(xs[0])>0:\n try:\n problem='b'+xs[0][2:]\n except:\n continue\n elif len(xs)>1:\n if problem not in dic:\n dic[problem] = []\n dic[problem].append(xs)\n return dic\n\ndef gen_data(dic,tasks):\n random.shuffle(tasks)\n print_train=[]\n print_test=[]\n for problem,examples in dic.items():\n if len(examples) < 10:\n continue\n random.shuffle(examples)\n train = examples[:5]\n test = examples[5:10]\n for x in train:\n print_train.append((problem,list(x[0].strip()),list(x[1].strip())))\n for x in test:\n print_test.append((problem,list(x[0].strip()),list(x[1].strip())))\n\n for problem,a,b in print_train:\n yield 'build_pos({},{},{}).\\n'.format(problem,a,b)\n for problem,a,b in print_test:\n yield 'test_pos({},{},{}).\\n'.format(problem,a,b)\n yield 'tasks({}).\\n'.format(tasks)\n\n\ndef gen_real_data():\n dic=load_probs()\n tasks = list(dic.keys())\n for x in gen_data(dic,tasks):\n yield x\n\ndef do_gen_data():\n for k in trials:\n for playtime in playtimes:\n gen_syn_data(playtime,k)\n with open('data/build-{}.pl'.format(k),'w') as f:\n for x in gen_real_data():\n f.write( x + '\\n')\n\ndef play_and_buid():\n for system in systems:\n for k in trials:\n for p in playtimes:\n playf=\"'data/play-{}-{}'\".format(p,k)\n buildf=\"'data/build-{}'\".format(k)\n programf=f\"programs/{system}/{p}-{k}.pl\"\n call_prolog('a',[playf,buildf],programf)\n\n\ndef call_prolog_new(action,load_files,output):\n cmd = \"load_files([{}],[silent(true)]). \".format(','.join(load_files))\n cmd += '{}.'.format(action)\n with open(output, 'w') as outf:\n p = subprocess.Popen(['swipl','-q','-G8g','-T8g','-L8g'], stdin=subprocess.PIPE, stdout=outf)\n p.stdin.write(cmd.encode())\n print(cmd)\n (output, err) = p.communicate()\n\n\ndef prepare_play_as_background(refactored_play_file, output_file):\n f = open(refactored_play_file)\n outf = open(output_file, 'w')\n\n primitives = []\n for line in f.readlines():\n if len(line) > 3:\n head, body = line.strip().split(\":-\")\n outf.write(f\"{head.replace('-', '_')} :- {body.replace('-', '_')}.\\n\")\n head = head.strip().replace(')', '(').split('(')\n head, args = head[0], head[1]\n primitives.append(f\"{head}/{len(args.split(','))}\")\n\n outf.close()\n f.close()\n\n return primitives\n\ndef generate_experiment_file(exp_file, refactored_primitives, primitives_to_add):\n f = open(exp_file, 'w')\n\n f.write(experiment_files + \"\\n\")\n f.write(f\":-['{refactored_primitives}'].\\n\")\n f.write(\"\\n\")\n\n f.write(experiment_setup + \"\\n\\n\")\n f.write(experiment_primitives + \"\\n\")\n\n for prim in primitives_to_add:\n f.write(f\"prim({prim}).\\n\")\n\n f.write(experiment_rest + \"\\n\")\n\n f.close()\n\n\ndef build_p():\n print(f\"running build_p; trials\")\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n if not os.path.exists(\"./programs_p\"):\n os.mkdir(\"./programs_nonredundant\")\n\n if not os.path.exists(f\"./programs_nonredundant\"):\n os.mkdir(f\"./programs_nonredundant\")\n\n for k in trials:\n for p in playtimes:\n print(f\"trial {k} play {p}\")\n playf = f\"{ROOT_FILE}/nonredundant_programs/strings/refactored_programs-{p}-{k}.pl\" # \"'data/play-{}-{}'\".format(p,k)\n buildf = f\"'{ROOT_FILE}/build/strings/build-{k}.pl'\" # \"'data/build-{}'\".format(k)\n programf = f\"./programs_nonredundant/programs-{p}-{k}.pl\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n refprimf = f\"./programs_nonredundant/refactored_primitives-{p}-{k}.pl\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n expf = f\"./programs_nonredundant/experiment-{p}-{k}.pl\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n\n prims = prepare_play_as_background(playf, refprimf)\n generate_experiment_file(expf, refprimf, prims)\n\n files_to_load = [f\"'{expf}'\", f\"'{refprimf}'\", buildf]\n\n call_prolog_new('b', files_to_load, programf)\n\ndef test():\n for system in systems:\n for k in trials:\n for p in playtimes:\n buildf=\"'data/build-{}'\".format(k)\n programf=f\"'programs/{system}/{p}-{k}.pl'\"\n resultsf=f'results/{system}/{p}-{k}.pl'\n call_prolog('do_test',[buildf,programf],resultsf)\n\n# def read_file(p,k):\n# num_solved=0\n# acc=[]\n# fname = f'results/{system}/{p}-{k}.pl'\n# with open(,'r') as f:\n# for line in f:\n# line=line.strip()\n# xs=line.split(',')\n# if len(xs) <2:\n# continue\n# if line.startswith('%solved'):\n# num_solved+=int(xs[2])\n# else:\n# # k_instances+=1\n# acc+=[int(xs[1])]\n# return num_solved,acc\n\n\ndef test_new():\n # SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_singl{REJECT_SINGLETONS}'\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n if not os.path.exists(\"./results_nonredundant\"):\n os.mkdir(\"./results_nonredundant\")\n\n if not os.path.exists(f\"./results_nonredundant\"):\n os.mkdir(f\"./results_nonredundant\")\n\n for k in trials:\n for p in playtimes:\n buildf = f\"'{ROOT_FILE}/build/strings/build-{k}.pl'\"\n programf = f\"'./programs_nonredundant/programs-{p}-{k}.pl'\"\n resultsf = f'./results_nonredundant/programs-{p}-{k}.pl'\n expf = f\"'./programs_nonredundant/experiment-{p}-{k}.pl'\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n #call_prolog('do_test',[buildf,programf],resultsf)\n\n call_prolog_new('do_test', [expf, buildf, programf], resultsf)\n\n\ndef get_acc(system,p):\n tasks=['b36', 'b132', 'b246', 'b167', 'b87', 'b304', 'b47', 'b94', 'b284', 'b116', 'b157', 'b239', 'b224', 'b285', 'b215', 'b179', 'b92', 'b227', 'b111', 'b99', 'b35', 'b38', 'b307', 'b91', 'b151', 'b83', 'b61', 'b247', 'b298', 'b67', 'b120', 'b325', 'b63', 'b7', 'b48', 'b33', 'b27', 'b108', 'b78', 'b252', 'b133', 'b1', 'b80', 'b139', 'b100', 'b308', 'b30', 'b136', 'b109', 'b3', 'b103', 'b149', 'b323', 'b29', 'b34', 'b189', 'b293', 'b134', 'b43', 'b314', 'b326', 'b324', 'b188', 'b123', 'b137', 'b98', 'b4', 'b283', 'b300', 'b249', 'b162', 'b24', 'b56', 'b292', 'b241', 'b327', 'b23', 'b6', 'b238', 'b186', 'b81', 'b156', 'b73', 'b102', 'b153', 'b113', 'b37', 'b76', 'b196', 'b5', 'b309', 'b25', 'b184', 'b181']\n\n all_accs=[]\n accs=[]\n for k in trials:\n k_acc=[]\n fname = f'results/{system}/{p}-{k}.pl'\n with open(fname,'r') as f:\n data=f.read()\n probs=data.split('%')\n for prob in probs:\n xs=prob.split('\\n')\n if len(xs) == 0:\n continue\n if xs[0].startswith('solved'):\n (_,t,solved) = xs[0].split(',')\n if t not in tasks:\n continue\n for vs in xs[1:]:\n vs = vs.split(',')\n if len(vs) != 2:\n continue\n all_accs.append(int(vs[1]))\n k_acc.append(int(vs[1]))\n accs.append(np.mean(k_acc))\n return (np.mean(accs),stats.sem(accs),all_accs)\n\n\ndef get_acc_new(setup,p):\n tasks=['b36', 'b132', 'b246', 'b167', 'b87', 'b304', 'b47', 'b94', 'b284', 'b116', 'b157', 'b239', 'b224', 'b285', 'b215', 'b179', 'b92', 'b227', 'b111', 'b99', 'b35', 'b38', 'b307', 'b91', 'b151', 'b83', 'b61', 'b247', 'b298', 'b67', 'b120', 'b325', 'b63', 'b7', 'b48', 'b33', 'b27', 'b108', 'b78', 'b252', 'b133', 'b1', 'b80', 'b139', 'b100', 'b308', 'b30', 'b136', 'b109', 'b3', 'b103', 'b149', 'b323', 'b29', 'b34', 'b189', 'b293', 'b134', 'b43', 'b314', 'b326', 'b324', 'b188', 'b123', 'b137', 'b98', 'b4', 'b283', 'b300', 'b249', 'b162', 'b24', 'b56', 'b292', 'b241', 'b327', 'b23', 'b6', 'b238', 'b186', 'b81', 'b156', 'b73', 'b102', 'b153', 'b113', 'b37', 'b76', 'b196', 'b5', 'b309', 'b25', 'b184', 'b181']\n\n all_accs=[]\n accs=[]\n for k in trials:\n k_acc=[]\n fname = f'results_nonredundant/programs-{p}-{k}.pl'\n with open(fname,'r') as f:\n data=f.read()\n probs=data.split('%')\n for prob in probs:\n xs=prob.split('\\n')\n if len(xs) == 0:\n continue\n if xs[0].startswith('solved'):\n (_,t,solved) = xs[0].split(',')\n if t not in tasks:\n continue\n for vs in xs[1:]:\n vs = vs.split(',')\n if len(vs) != 2:\n continue\n all_accs.append(int(vs[1]))\n k_acc.append(int(vs[1]))\n print({\"system\": \"No redundancy\", \"playtask\": p, \"trial\": k, \"accuracy\": np.mean(k_acc)})\n accs.append(np.mean(k_acc))\n return (np.mean(accs),stats.sem(accs),all_accs)\n\n\ndef results():\n system_accs = {}\n for system in systems:\n print(system)\n system_accs[system]=[]\n for p in playtimes:\n (acc,sem,all_accs) = get_acc(system,p)\n system_accs[system].extend(all_accs)\n print('({},{}) +- (0,{})'.format(p,round(acc*100,2),round(sem*100,2)))\n\n\ndef results_new():\n system_accs = {}\n for system in ['playgol']:\n print(system)\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n system_accs[system]=[]\n for p in playtimes:\n (acc,sem,all_accs) = get_acc_new(SETUP,p)\n system_accs[system].extend(all_accs)\n #print('({},{}) +- (0,{})'.format(p,round(acc*100,2),round(sem*100,2)))\n\n\ndef parse_size(setup, p, k):\n playf = f\"{ROOT_FILE}/refactored_programs_p/strings/{setup}/refactored_programs-{p}-{k}.pl.log\"\n last_non_empty_line = \"\"\n fil = open(playf)\n for line in fil.readlines():\n if len(line) > 3:\n last_non_empty_line = line.strip()\n ori_size, refactor_size = int(last_non_empty_line.split()[6]), int(last_non_empty_line.split()[-1])\n fil.close()\n\n return ori_size, refactor_size\n\n\ndef theory_sizes():\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n sizes = []\n for p in playtimes:\n for t in trials:\n ori, ref = parse_size(SETUP, p, t)\n sizes.append({'playtasks': p, 'trial': t, 'original': ori, 'refactored': ref})\n print({\"playtasks\": p, \"trial\": t, \"system\": \"knorf\", \"theory_size\": ref})\n print({\"playtasks\": p, \"trial\": t, \"system\": \"metagol\", \"theory_size\": ori})\n\n return sizes\n\n\ndef parse_runtime(setup, p, k):\n programf = f\"./programs_p/{setup}/programs-{p}-{k}.pl\"\n start_time = None\n end_time = None\n fil = open(programf)\n\n for line in fil.readlines():\n if '% started solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n start_time = datetime.datetime(day=int(tmp[6]), month=int(tmp[7]), year=int(tmp[8]), hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n if '% finished solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n end_time = datetime.datetime(day=int(tmp[6]), month=int(tmp[7]), year=int(tmp[8]), hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n\n if end_time is None:\n end_time = datetime.datetime.fromtimestamp(os.path.getmtime(programf))\n\n return (end_time - start_time).total_seconds()\n\ndef parse_runtime_trial(setup, p, k):\n programf = f\"./programs_nonredundant/programs-{p}-{k}.pl\"\n start_time = None\n end_time = None\n try:\n fil = open(programf)\n except Exception:\n return {}\n times = []\n last_timeout_count = 0\n\n for line in fil.readlines():\n if '% started solving build task' in line and start_time == None:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n start_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif 'finished solving build tasks at' in line:\n tmp = line.strip().split()\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n time = tmp[-1].split(\":\")\n end_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n last_timeout_count = 0\n elif 'timeout' in line:\n last_timeout_count += 1\n\n return {'type': 'No redundancy', 'playtasks': p, 'trial': k, 'runtime': (end_time - start_time).total_seconds() + last_timeout_count + 1}\n\n\n\n\ndef runtimes():\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime(SETUP, p, t)\n runtimes.append({'type': 'refactored', 'playtasks': p, 'trial': t, 'runtime': seconds})\n print({\"system\": \"knorf\", \"playtask\": p, \"trial\": t, \"runtime\": seconds})\n\n return runtimes\n\ndef runtimes_trial():\n runtimes = []\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime_trial(SETUP, p, t)\n if len(seconds):\n print(str(seconds) + \",\")\n #runtimes.append({'type': 'originl', 'playtasks': p, 'trial': t, 'runtime': seconds})\n #print({\"system\": \"metagol\", \"playtasks\": p, \"trial\": t, \"runtime\": seconds})\n\n return runtimes\n\ndef get_build_program_size(setup, p, k):\n programf = f\"./programs_p/{setup}/programs-{p}-{k}.pl\"\n count = 0\n fil = open(programf)\n\n for line in fil.readlines():\n if line.startswith('%') or len(line) < 3 or 'true' in line:\n continue\n elif line.startswith('b'):\n head, body = line.strip().split(':-')\n body = body.replace('.', ',')\n body = body.split('),')\n count += 1 + len(body)\n else:\n pass\n\n return count\n\n\ndef build_program_size():\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = get_build_program_size(SETUP, p, t)\n runtimes.append({'type': 'refactored', 'playtasks': p, 'trial': t, 'runtime': seconds})\n print({\"system\": \"knorf\", \"playtask\": p, \"trial\": t, \"program_size\": seconds})\n\n return runtimes\n\n\ncmd = sys.argv[1]\nstp = sys.argv[2]\n\nother_setup = stp # expected to be in form parameter:value separated with white-space\nother_setup = other_setup.strip().split()\nother_setup = dict([tuple(c.split(\":\")) for c in other_setup])\n\nif 'maxl' in other_setup:\n MAX_LITERALS = int(other_setup['maxl'])\n\nif 'minl' in other_setup:\n MIN_LITERALS = int(other_setup['minl'])\n\nif 't' in other_setup:\n if ',' not in other_setup['t']:\n trials = [int(other_setup['t'])]\n else:\n trials = [int(x) for x in other_setup['t'].split(',')]\n\nif 'mlay' in other_setup:\n MAX_LAYERS = int(other_setup['mlay'])\n\nif 'pt' in other_setup:\n if ',' not in other_setup['pt']:\n playtimes = [int(other_setup['pt'])]\n else:\n playtimes = [int(x) for x in other_setup['pt'].split(\",\")]\n\nif 'mt' in other_setup:\n MAX_TIME_S = int(other_setup['mt'])\n\nif 'p' in other_setup:\n PRUNE = bool(other_setup['p'])\n\nif 'a' in other_setup:\n EXCLUDE_ALTERNATIVES = True if other_setup['a'] == 'true' else False\n\nif 'rc' in other_setup:\n EXCLUDE_REDUNDANT_CANDS = bool(other_setup['rc'])\n\nif 'rs' in other_setup:\n REJECT_SINGLETONS = bool(other_setup['rs'])\n\nif 'rr' in other_setup:\n EXCLUDE_REDUNDANCIES = bool(other_setup['rr'])\n\nif 'mr' in other_setup:\n MINIMISE_REDUNDANCIES = bool(other_setup['mr'])\n\nif cmd == 'gen':\n do_gen_data()\nif cmd == 'learn':\n play_and_buid()\nif cmd == 'learn-build-p':\n build_p()\nif cmd == 'test':\n test_new()\nif cmd == 'results':\n results_new()\nif cmd == 'size':\n theory_sizes()\nif cmd == 'runtime':\n runtimes()\nif cmd == 'runtime-trial':\n runtimes_trial()\nif cmd == 'program-size':\n build_program_size()\n" }, { "alpha_fraction": 0.5059076547622681, "alphanum_fraction": 0.5273898839950562, "avg_line_length": 33.48147964477539, "blob_id": "4665683c094fb1cac191a5aaf13af2c3ab696392", "content_id": "d90b20a2b83bb5c7bec0e160b2f4c385b6f0f4b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 931, "license_type": "no_license", "max_line_length": 163, "num_lines": 27, "path": "/experiments/knorf/refactored_programs_pncorrect/lego/literals2-3_layerNone_time5400s_pruneTrue_altFalse_rcandsTrue_rrTrue_mrTrue_singlTrue/get_prog_size_stats.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "trials = list(range(1,11))\nplaytasks = list(range(200,4200,200))\n\nfor t in trials:\n for pt in playtasks:\n fname = f\"refactored_programs-{pt}-{t}.pl.log\"\n try:\n ff = open(fname)\n lines = ff.readlines()\n ff.close()\n\n lines = lines[-2:]\n predicate_line = lines[0].split()\n literal_line = lines[1].split()\n\n old_preds = predicate_line[6]\n new_preds = predicate_line[-1]\n\n old_lits = literal_line[6]\n new_lits = literal_line[-1]\n \n print({\"type\": \"predicate\", \"old\": int(old_preds), \"new\": int(new_preds), \"reduction\": float(new_preds)/float(old_preds), \"trial\": t, \"playtasks\": pt})\n\n print({\"type\": \"literal\", \"old\": int(old_lits), \"new\": int(new_lits), \"reduction\": float(new_lits)/float(old_lits), \"trial\": t, \"playtasks\": pt})\n\n except Exception:\n pass\n" }, { "alpha_fraction": 0.7591241002082825, "alphanum_fraction": 0.7591241002082825, "avg_line_length": 33.25, "blob_id": "77698acfe44ccfc0cd1c2e4cdd323ac5e67405b6", "content_id": "508c767167015e19b9aaf9942774cb417195aed0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 50, "num_lines": 4, "path": "/loreleai/reasoning/lp/__init__.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "#from loreleai.reasoning.lp.datalog.muz import MuZ\nfrom .datalog.datalogsolver import DatalogSolver\n\n#__all__ = ['MuZ', 'DatalogSolver']\n" }, { "alpha_fraction": 0.5638092160224915, "alphanum_fraction": 0.572583794593811, "avg_line_length": 34.17246627807617, "blob_id": "9826dedd00a19223c9a4bf0a61bbd1a1b4c6bfe5", "content_id": "09f08477cc4acd46762a2642f4f7eb948d34e507", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23249, "license_type": "no_license", "max_line_length": 235, "num_lines": 661, "path": "/experiments/knorf/runners/lego/runner.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "import sys\nimport subprocess\nimport numpy as np\nimport scipy.stats as stats\nimport math\nimport random\nimport os\nimport datetime\n\nnum_trials = 10\nnum_tasks = 1000\nplaytimes = list(range(200,4200, 200)) \ntrials = list(range(1,num_trials+1))\nsystems = ['playgol','nopi']\nmax_right = 5\nmax_forwards = 5\nROOT_FILE = os.path.dirname(os.path.abspath(__file__)) + \"/../..\"\nMIN_LITERALS = 2\nMAX_LITERALS = 3\nMAX_LAYERS = None\nMAX_TIME_S = 5400\nPRUNE = True\nEXCLUDE_ALTERNATIVES = False\nEXCLUDE_REDUNDANT_CANDS = True\nEXCLUDE_REDUNDANCIES = True\nMINIMISE_REDUNDANCIES = True\nREJECT_SINGLETONS = True\n\n\nexperiment_files = \\\n\"\"\"\n:-['../playgol'].\n:-['lego-bk'].\n:-['../metagol'].\n\"\"\"\n\nexperiment_setup = \\\n\"\"\"\n\n:- use_module(library(time)).\n\nplay_time_interval(2).\nmax_build_time(60).\nmax_play_depth(4).\nmax_build_depth(5).\ncpus(4).\n\"\"\"\n\nexperiment_primitives = \\\n\"\"\"\n%% tell metagol to use the BK\nprim(left/2).\nprim(right/2).\nprim(place1/2).\nprim(at_start/1).\nprim(not_at_start/1).\nprim(at_end/1).\nprim(not_at_end/1).\n\"\"\"\n\nexperiment_rest = \\\n\"\"\"\n%% metarules\nmetarule(ident,[P/2,Q/2],([P,A,B]:-[[Q,A,B]])).\nmetarule(precon,[P/2,Q/1,R/2],([P,A,B]:-[[Q,A],[R,A,B]])).\nmetarule(postcon,[P/2,Q/2,R/1],([P,A,B]:-[[Q,A,B],[R,B]])).\nmetarule(chain,[P/2,Q/2,R/2],([P,A,B]:-[[Q,A,C],[R,C,B]])).\nmetarule(tailrec,[P/2,Q/2],([P,A,B]:-[[Q,A,C],[P,C,B]])).\n\nmetagol:functional.\n\nfunc_test([P,A,B],PS,Prog):-\n \\+ (metagol:prove_deduce([[P,A,C]],PS,Prog),C\\=B).\n\na:-\n cpus(CPU_COUNT),\n set_prolog_flag(cpu_count,CPU_COUNT),\n games(Games),\n playgol(Games),\n b,\n halt.\n\nb:-\n cpus(CPU_COUNT),\n set_prolog_flag(cpu_count,CPU_COUNT),\n %get_time(T),\n %stamp_date_time(T, date(DY,DM,DD,TH,TM,TS,_,_,_), 'UTC'),\n %format('% started solving build tasks at ~w ~w ~w ~w:~w:~w\\\\n', [DD, DM, DY, TH, TM, TS]),\n max_build_depth(BuildDepth),\n playgol:update_depth(BuildDepth),\n max_build_time(BuildTime),\n retractall(max_time(_)),\n assert(max_time(BuildTime)),\n tasks(Tasks),\n learn_tasks(Tasks,Progs),\n length(Progs,N),\n format('% num solved ~w\\\\n',[N]),\n %get_time(T2),\n %stamp_date_time(T2, date(DY2,DM2,DD2,TH2,TM2,TS2,_,_,_), 'UTC'),\n %format('% finished solving build tasks at ~w ~w ~w ~w:~w:~w\\\\n', [DD2, DM2, DY2, TH2, TM2, TS2]),\n halt.\n\nlearn_tasks(Tasks,Progs):-\n concurrent_maplist(learn_aux,Tasks,Xs),\n findall(true,member(true,Xs),Progs).\n\nlearn_aux(T,true):-\n build_pos(T,Atom),\n Pos=[Atom],\n max_time(MaxTime),\n get_time(TTT),\n stamp_date_time(TTT, date(DY,DM,DD,TH,TM,TS,_,_,_), 'UTC'),\n format('% started solving build tasks at ~w ~w ~w ~w:~w:~w\\\\n', [DD, DM, DY, TH, TM, TS]),\n catch(call_with_time_limit(MaxTime,learn(Pos,[],Prog)),time_limit_exceeded,(writeln('%timeout'),false)),!,\n get_time(TTT2),\n stamp_date_time(TTT2, date(DY2,DM2,DD2,TH2,TM2,TS2,_,_,_), 'UTC'),\n format('% finished solving build tasks at ~w ~w ~w ~w:~w:~w\\\\n', [DD2, DM2, DY2, TH2, TM2, TS2]),\n pprint(Prog).\nlearn_aux(_,false).\n\nplay_examples(T,Pos,[]):-\n findall(Atom,play_pos(T,Atom),Pos).\n\ndo_test:-\n tasks(Tasks),\n forall(member(Task,Tasks),(\n atomic_list_concat(['b',Task],Pred),\n (current_predicate(Pred/2) ->\n format('%solved,~w,~w\\\\n',[Task,1]);\n format('%solved,~w,~w\\\\n',[Task,0])))),\n halt.\n\"\"\"\n\n\nspace_size = 6\nplay_size = [2, 4]\nbuild_space = 6\n\n\ndef gen_grid(space_size):\n s = (space_size, space_size)\n a = np.random.randint(1, size=(1,space_size)).tolist()[0]\n b = np.random.randint(space_size+1, size=(1,space_size)).tolist()[0]\n return a, b\n\n\ndef gen_data_new():\n for k in trials:\n tasks = []\n for i in range(num_tasks):\n init, end = gen_grid(build_space)\n tasks.append(f'build_pos({i},b{i}(world(1,{build_space},{init}),world(_,{build_space},{end}))).\\n')\n with open('data/build-{}.pl'.format(k),'w') as f:\n f.write('tasks({}).\\n'.format(list(range(num_tasks))))\n for x in tasks:\n f.write(x)\n for playtime in playtimes:\n with open('data/play-{}-{}.pl'.format(playtime,k),'w') as f:\n f.write('games({}).\\n'.format(list(range(playtime))))\n for i in range(playtime):\n ps = random.randint(play_size[0], play_size[1])\n init, end = gen_grid(ps)\n f.write(f'play_pos({i},p{i}(world(1,{ps},{init}),world(_,{ps},{end}))).\\n')\n\n\ndef gen_data():\n for k in trials:\n for n in num_tasks:\n tasks_ = [f't{i}' for i in range(n)]\n with open(f'data/tasks-{n}-{k}.pl', 'w') as f:\n f.write('\\n'.join(tasks_))\n\n with open(f'data/train-{n}-{k}.pl', 'w') as f:\n for i in range(n):\n (a,b) = gen_grid()\n f.write(f'pos_ex(t{i},{a},{b}).\\n')\n\n\ndef call_prolog(action,load_files,output):\n cmd = \"load_files(['experiment',{}],[silent(true)]). \".format(','.join(load_files))\n cmd += '{}.'.format(action)\n with open(output, 'w') as outf:\n p = subprocess.Popen(['swipl','-q','-G8g','-T8g','-L8g'], stdin=subprocess.PIPE, stdout=outf)\n p.stdin.write(cmd.encode())\n print(cmd)\n (output, err) = p.communicate()\n\n\ndef play_and_buid():\n for k in trials:\n for p in playtimes:\n for s in systems:\n playf=\"'data/play-{}-{}'\".format(p,k)\n buildf=\"'data/build-{}'\".format(k)\n programf=f\"programs/{s}/programs-{p}-{k}.pl\"\n call_prolog('a',[playf,buildf],programf)\n\n\ndef prepare_play_as_background(refactored_play_file, output_file):\n f = open(refactored_play_file)\n outf = open(output_file, 'w')\n\n primitives = []\n for line in f.readlines():\n if len(line) > 3:\n head, body = line.strip().split(\":-\")\n outf.write(f\"{head.replace('-', '_')} :- {body.replace('-', '_')}.\\n\")\n head = head.strip().replace(')', '(').split('(')\n head, args = head[0], head[1]\n primitives.append(f\"{head}/{len(args.split(','))}\")\n\n outf.close()\n f.close()\n\n return primitives\n\n\ndef generate_experiment_file(exp_file, refactored_primitives, primitives_to_add):\n f = open(exp_file, 'w')\n\n f.write(experiment_files + \"\\n\")\n f.write(f\":-['{refactored_primitives}'].\\n\")\n f.write(\"\\n\")\n\n f.write(experiment_setup + \"\\n\\n\")\n f.write(experiment_primitives + \"\\n\")\n\n for prim in primitives_to_add:\n f.write(f\"prim({prim}).\\n\")\n\n f.write(experiment_rest + \"\\n\")\n\n f.close()\n\n\ndef call_prolog_new(action,load_files,output):\n cmd = \"load_files([{}],[silent(true)]). \".format(','.join(load_files))\n cmd += '{}.'.format(action)\n with open(output, 'w') as outf:\n p = subprocess.Popen(['swipl','-q','-G8g','-T8g','-L8g'], stdin=subprocess.PIPE, stdout=outf)\n p.stdin.write(cmd.encode())\n print(cmd)\n (output, err) = p.communicate()\n\n\ndef build_p():\n print(f\"running build_p; trials\")\n # SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_singl{REJECT_SINGLETONS}'\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n if not os.path.exists(\"./programs_pncorrect\"):\n os.mkdir(\"./programs_pncorrect\")\n\n if not os.path.exists(f\"./programs_pncorrect/{SETUP}\"):\n os.mkdir(f\"./programs_pncorrect/{SETUP}\")\n\n for k in trials:\n for p in playtimes:\n print(f\"trial {k} play {p}\")\n playf = f\"{ROOT_FILE}/refactored_programs_pncorrect/lego/{SETUP}/refactored_programs-{p}-{k}.pl\" # \"'data/play-{}-{}'\".format(p,k)\n buildf = f\"'{ROOT_FILE}/build/lego/build-{k}.pl'\" # \"'data/build-{}'\".format(k)\n programf = f\"./programs_pncorrect/{SETUP}/programs-{p}-{k}.pl\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n refprimf = f\"./programs_pncorrect/{SETUP}/refactored_primitives-{p}-{k}.pl\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n expf = f\"./programs_pncorrect/{SETUP}/experiment-{p}-{k}.pl\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n\n prims = prepare_play_as_background(playf, refprimf)\n generate_experiment_file(expf, refprimf, prims)\n\n files_to_load = [f\"'{expf}'\", f\"'{refprimf}'\", buildf]\n\n call_prolog_new('b', files_to_load, programf)\n\n\ndef build_pb():\n if not os.path.exists(\"./programs_pncorrectb\"):\n os.mkdir(\"./programs_pncorrectb\")\n\n for k in trials:\n for p in playtimes:\n playf = f\"{ROOT_FILE}/refactored_programs_pncorrectb/lego/refactored_programs-{p}-{k}.pl\" # \"'data/play-{}-{}'\".format(p,k)\n buildf = f\"'{ROOT_FILE}/build/lego/build-{k}.pl'\" # \"'data/build-{}'\".format(k)\n programf = f\"./programs_pncorrectb/programs-{p}-{k}.pl\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n refprimf = f\"./programs_pncorrectb/refactored_primitives-{p}-{k}.pl\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n expf = f\"./programs_pncorrectb/experiment-{p}-{k}.pl\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n\n prims = prepare_play_as_background(playf, refprimf)\n generate_experiment_file(expf, refprimf, prims)\n\n files_to_load = [f\"'{expf}'\", f\"'{refprimf}'\", buildf]\n\n call_prolog_new('b', files_to_load, programf)\n\n\ndef test():\n for k in trials:\n for p in playtimes:\n for s in systems:\n buildf=\"'data/build-{}'\".format(k)\n programf=f\"'programs/{s}/programs-{p}-{k}.pl'\"\n resultsf=f'results/{s}/{p}-{k}.pl'\n call_prolog('do_test',[buildf,programf],resultsf)\n\n\ndef test_new():\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n if not os.path.exists(\"./results_pncorrect\"):\n os.mkdir(\"./results_pncorrect\")\n\n if not os.path.exists(f\"./results_pncorrect/{SETUP}\"):\n os.mkdir(f\"./results_pncorrect/{SETUP}\")\n\n for k in trials:\n for p in playtimes:\n buildf = f\"'{ROOT_FILE}/build/lego/build-{k}.pl'\"\n programf = f\"'./programs_pncorrect/{SETUP}/programs-{p}-{k}.pl'\"\n resultsf = f'./results_pncorrect/{SETUP}/{p}-{k}.pl'\n expf = f\"'./programs_pncorrect/{SETUP}/experiment-{p}-{k}.pl'\" # f\"programs/{s}/programs-{p}-{k}.pl\"\n #call_prolog('do_test',[buildf,programf],resultsf)\n\n call_prolog_new('do_test', [expf, buildf, programf], resultsf)\n\n\ndef get_accs(system,p):\n all_num_solved=[]\n all_accs=[]\n for k in trials:\n k_num_solved=[]\n fname = f'results/{system}/{p}-{k}.pl'\n with open(fname,'r') as f:\n for line in f:\n line=line.strip()\n xs=line.split(',')\n if len(xs) <2:\n continue\n if line.startswith('%solved'):\n k_num_solved+=[int(xs[2])]\n all_accs.append(int(xs[2]))\n # else:\n # all_accs+=[int(xs[1])]\n all_num_solved.append(np.mean(k_num_solved))\n return (np.mean(all_num_solved)*100,stats.sem(all_num_solved)*100,all_accs)\n\n\ndef mct(xs,ys):\n b = sum(1.0 for (x,y) in zip(xs,ys) if x == 1 and y == 0)\n c = sum(1.0 for (x,y) in zip(xs,ys) if x == 0 and y == 1)\n # print(b,c)\n McN = math.pow((b-c),2) / (b+c)\n print('P-value: %f'%(1-stats.chi2.cdf(McN,1)))\n\n\ndef results():\n system_accs = {}\n for system in systems:\n system_accs[system]=[]\n for p in playtimes:\n (num_solved,sem,all_accs) = get_accs(system,p)\n system_accs[system].extend(all_accs)\n print('({},{}) +- (0,{})'.format(p,round(num_solved,2),round(sem,2)))\n # xs=system_accs['playgol']\n # ys=system_accs['nopi']\n # mct(xs,ys)\n\n\ndef get_accs_new(setup, p):\n all_num_solved=[]\n all_accs=[]\n for k in trials:\n k_num_solved=[]\n fname = f'./results_pncorrect/{setup}/{p}-{k}.pl'\n with open(fname,'r') as f:\n for line in f:\n line=line.strip()\n xs=line.split(',')\n if len(xs) <2:\n continue\n if line.startswith('%solved'):\n k_num_solved+=[int(xs[2])]\n all_accs.append(int(xs[2]))\n # else:\n # all_accs+=[int(xs[1])]\n #p_size = []\n #for ik in trials:\n # playf = f\"{ROOT_FILE}/programs_p/lego/programs-{p}-{ik}.pl\"\n # p_counter = 0\n # for line in open(playf).readlines():\n # if len(line) > 3 and not line.startswith('%'):\n # p_counter += 1\n # p_size.append(p_counter)\n print({\"system\": \"knorf\", \"playtask\": p, \"trial\": k, \"acuracy\": np.mean(k_num_solved)})\n #print({\"system\": \"knorf\", \"playtask\": p, \"trial\": k, \"acuracy\": np.mean(k_num_solved)})\n all_num_solved.append(np.mean(k_num_solved))\n return (np.mean(all_num_solved)*100,stats.sem(all_num_solved)*100,all_accs)\n\n\ndef results_new():\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n for p in playtimes:\n (num_solved, sem, all_accs) = get_accs_new(SETUP, p)\n #print('({},{}) +- (0,{})'.format(p, round(num_solved, 2), round(sem, 2)))\n\n\ndef parse_size(setup, p, k):\n playf = f\"{ROOT_FILE}/refactored_programs_pncorrect/lego/{setup}/refactored_programs-{p}-{k}.pl.log\"\n last_non_empty_line = \"\"\n fil = open(playf)\n for line in fil.readlines():\n if len(line) > 3:\n last_non_empty_line = line.strip()\n ori_size, refactor_size = int(last_non_empty_line.split()[6]), int(last_non_empty_line.split()[-1])\n fil.close()\n\n return ori_size, refactor_size\n\n\ndef theory_sizes():\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n sizes = []\n for p in playtimes:\n for t in trials:\n ori, ref = parse_size(SETUP, p, t)\n sizes.append({'playtasks': p, 'trial': t, 'original': ori, 'refactored': ref})\n print({'playtasks': p, 'trial': t, 'system': \"knorf\", 'theory_size': ref})\n print({'playtasks': p, 'trial': t, 'system': \"metagol\", 'theory_size': ori})\n\n return sizes\n\n\ndef parse_runtime(setup, p, k):\n programf = f\"./programs_pncorrect/{setup}/programs-{p}-{k}.pl\"\n start_time = None\n end_time = None\n fil = open(programf)\n\n for line in fil.readlines():\n if '% started solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n start_time = datetime.datetime(day=int(tmp[6]), month=int(tmp[7]), year=int(tmp[8]), hour=int(time[0]),\n minute=int(time[1]), second=int(float(time[2])))\n if '% finished solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n end_time = datetime.datetime(day=int(tmp[6]), month=int(tmp[7]), year=int(tmp[8]), hour=int(time[0]),\n minute=int(time[1]), second=int(float(time[2])))\n if start_time is None:\n start_time = datetime.datetime.fromtimestamp(os.path.getctime(programf))\n\n if end_time is None:\n end_time = datetime.datetime.fromtimestamp(os.path.getmtime(programf))\n\n return (end_time - start_time).total_seconds()\n\n\ndef runtimes():\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime(SETUP, p, t)\n runtimes.append({'type': 'refactored', 'playtasks': p, 'trial': t, 'runtime': seconds})\n print({\"system\": \"knorf\", \"playtask\": p, \"trial\": t, \"runtime\": seconds})\n\n return runtimes\n\ndef parse_runtime_new(setup, p, k):\n programf = f\"./programs_pncorrect/{setup}/programs-{p}-{k}.pl\"\n start_time = None\n end_time = None\n try:\n fil = open(programf)\n except Exception:\n return []\n times = []\n\n for line in fil.readlines():\n if '% started solving build task' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n start_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif 'finished solving build tasks at' in line:\n tmp = line.strip().split()\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n time = tmp[-1].split(\":\")\n end_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif 'timeout' in line:\n times.append({'type': 'Refactoring', 'playtasks': p, 'trial': k, 'runtime': 60})\n start_time=None\n\n if start_time is not None and end_time is not None:\n times.append({'type': 'Refactoring', 'playtasks': p, 'trial': k, 'runtime': (end_time - start_time).total_seconds()})\n start_time=None\n end_time=None\n\n return times\n\n\ndef parse_runtime_trial(setup, p, k):\n programf = f\"./programs_pncorrect/{setup}/programs-{p}-{k}.pl\"\n start_time = None\n end_time = None\n try:\n fil = open(programf)\n except Exception:\n return {}\n times = []\n last_timeout_count = 0\n\n for line in fil.readlines():\n if '% started solving build task' in line and start_time == None:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n start_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif 'finished solving build tasks at' in line:\n tmp = line.strip().split()\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n time = tmp[-1].split(\":\")\n end_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n last_timeout_count = 0\n elif 'timeout' in line:\n last_timeout_count += 1\n\n return {'type': 'Refactoring', 'playtasks': p, 'trial': k, 'runtime': (end_time - start_time).total_seconds() + last_timeout_count + 1}\n\n\ndef runtimes_new():\n runtimes = []\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime_new(SETUP, p, t)\n runtimes += seconds\n for item in seconds:\n print(item)\n\n\ndef runtimes_trial():\n runtimes = []\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime_trial(SETUP, p, t)\n if len(seconds): \n print(str(seconds) + \",\")\n\n\ndef get_build_program_size(setup, p, k):\n programf = f\"./programs_pncorrect/{setup}/programs-{p}-{k}.pl\"\n count = 0\n fil = open(programf)\n\n for line in fil.readlines():\n if line.startswith('%') or len(line) < 3 or 'true' in line:\n continue\n elif line.startswith('b'):\n head, body = line.strip().split(':-')\n body = body.replace('.', ',')\n body = body.split('),')\n count += 1 + len(body)\n else:\n pass\n\n return count\n\ndef build_program_size():\n SETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = get_build_program_size(SETUP, p, t)\n runtimes.append({'type': 'refactored', 'playtasks': p, 'trial': t, 'runtime': seconds})\n print({\"system\": \"knorf\", \"playtask\": p, \"trial\": t, \"program_size\": seconds})\n\n return runtimes\n\n# play_and_buid()\n# test()\n# results()\n\ncmd = sys.argv[1]\nstp = sys.argv[2]\n\nother_setup = stp # expected to be in form parameter:value separated with white-space\nother_setup = other_setup.strip().split()\nother_setup = dict([tuple(c.split(\":\")) for c in other_setup])\n\nif 'maxl' in other_setup:\n MAX_LITERALS = int(other_setup['maxl'])\n\nif 'minl' in other_setup:\n MIN_LITERALS = int(other_setup['minl'])\n\nif 't' in other_setup:\n if ',' not in other_setup['t']:\n trials = [int(other_setup['t'])]\n else:\n trials = [int(x) for x in other_setup['t'].split(',')]\n\nif 'mlay' in other_setup:\n MAX_LAYERS = int(other_setup['mlay'])\n\nif 'pt' in other_setup:\n if ',' not in other_setup['pt']:\n playtimes = [int(other_setup['pt'])]\n else:\n playtimes = [int(x) for x in other_setup['pt'].split(\",\")]\n\nif 'mt' in other_setup:\n MAX_TIME_S = int(other_setup['mt'])\n\nif 'p' in other_setup:\n PRUNE = bool(other_setup['p'])\n\nif 'a' in other_setup:\n EXCLUDE_ALTERNATIVES = True if other_setup['a'] == 'true' else False\n\nif 'rc' in other_setup:\n EXCLUDE_REDUNDANT_CANDS = bool(other_setup['rc'])\n\nif 'rs' in other_setup:\n REJECT_SINGLETONS = bool(other_setup['rs'])\n\nif 'rr' in other_setup:\n EXCLUDE_REDUNDANCIES = bool(other_setup['rr'])\n\nif 'mr' in other_setup:\n MINIMISE_REDUNDANCIES = bool(other_setup['mr'])\n\nif cmd == 'gen':\n gen_data_new()\nif cmd == 'learn':\n play_and_buid()\nif cmd == 'learn-build-p':\n build_p()\nif cmd == 'learn-build-pb':\n build_pb()\nif cmd == 'test':\n test()\nif cmd == 'test-new':\n test_new()\nif cmd == 'results':\n results()\nif cmd == 'results-new':\n results_new()\nif cmd == 'size':\n theory_sizes()\nif cmd == 'runtime':\n runtimes_new()\nif cmd == 'runtime-trial':\n runtimes_trial()\nif cmd == 'program-size':\n build_program_size()\n" }, { "alpha_fraction": 0.5219248533248901, "alphanum_fraction": 0.5360668301582336, "avg_line_length": 34.71525573730469, "blob_id": "762f6fe19e59b51f1049c1e6e70c7a3c58fad4fc", "content_id": "493faf9752edbbef8f5fd4622303c9626f816332", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10536, "license_type": "no_license", "max_line_length": 164, "num_lines": 295, "path": "/playgol_experiments/e3-lego/runner.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "import sys\nimport random\nimport os\nimport numpy as np\nimport scipy.stats as stats\nsys.path.append('../')\nimport subprocess\nimport datetime\n\nnum_trials=10\nnum_tasks=1000\nplaytimes = list(range(200,4200,200))\ntrials = list(range(1,num_trials+1))\nsystems = ['playgol']\n\n\nspace_size = 6\nplay_size = [2,4]\nbuild_space = 6\n\ndef gen_grid(space_size):\n s = (space_size, space_size)\n a = np.random.randint(1, size=(1,space_size)).tolist()[0]\n b = np.random.randint(space_size+1, size=(1,space_size)).tolist()[0]\n return a,b\n\ndef gen_data_new():\n for k in trials:\n tasks = []\n for i in range(num_tasks):\n init, end = gen_grid(build_space)\n tasks.append(f'build_pos({i},b{i}(world(1,{build_space},{init}),world(_,{build_space},{end}))).\\n')\n with open('data/build-{}.pl'.format(k),'w') as f:\n f.write('tasks({}).\\n'.format(list(range(num_tasks))))\n for x in tasks:\n f.write(x)\n for playtime in playtimes:\n with open('data/play-{}-{}.pl'.format(playtime,k),'w') as f:\n f.write('games({}).\\n'.format(list(range(playtime))))\n for i in range(playtime):\n ps = random.randint(play_size[0], play_size[1])\n init, end = gen_grid(ps)\n f.write(f'play_pos({i},p{i}(world(1,{ps},{init}),world(_,{ps},{end}))).\\n')\n\ndef gen_data():\n for k in trials:\n for n in tasks:\n tasks_ = [f't{i}' for i in range(n)]\n with open(f'data/tasks-{n}-{k}.pl', 'w') as f:\n f.write('\\n'.join(tasks_))\n\n with open(f'data/train-{n}-{k}.pl', 'w') as f:\n for i in range(n):\n (a,b) = gen_grid()\n f.write(f'pos_ex(t{i},{a},{b}).\\n')\n\ndef call_prolog(action,load_files,output):\n cmd = \"load_files(['experiment',{}],[silent(true)]). \".format(','.join(load_files))\n cmd += '{}.'.format(action)\n with open(output, 'w') as outf:\n p = subprocess.Popen(['swipl','-q','-G8g','-T8g','-L8g'], stdin=subprocess.PIPE, stdout=outf)\n p.stdin.write(cmd.encode())\n print(cmd)\n (output, err) = p.communicate()\n\ndef play_and_buid():\n for k in trials:\n for p in playtimes:\n for s in systems:\n playf=\"'data/play-{}-{}'\".format(p,k)\n buildf=\"'data/build-{}'\".format(k)\n programf=f\"programs/{s}/programs-{p}-{k}.pl\"\n call_prolog('a',[playf,buildf],programf)\n\ndef test():\n for k in trials:\n for p in playtimes:\n for s in systems:\n buildf=\"'data/build-{}'\".format(k)\n programf=f\"'programs/{s}/programs-{p}-{k}.pl'\"\n resultsf=f'results/{s}/{p}-{k}.pl'\n call_prolog('do_test',[buildf,programf],resultsf)\n\n\ndef get_accs(system,p):\n all_num_solved=[]\n all_accs=[]\n for k in trials:\n k_num_solved=[]\n fname = f'results/{system}/{p}-{k}.pl'\n with open(fname,'r') as f:\n for line in f:\n line=line.strip()\n xs=line.split(',')\n if len(xs) <2:\n continue\n if line.startswith('%solved'):\n k_num_solved+=[int(xs[2])]\n all_accs.append(int(xs[2]))\n # else:\n # all_accs+=[int(xs[1])]\n #playf = f\"./programs/playgol/programs-{p}-{k}.pl\"\n #p_size = []\n #for ik in trials:\n # playf = f\"./programs/playgol/programs-{p}-{ik}.pl\"\n # p_counter = 0\n # for line in open(playf).readlines():\n # if len(line) > 3 and line.startswith('p'):\n # p_counter += 1\n # p_size.append(p_counter)\n print({\"system\": \"metagol\", \"playtask\": p, \"trial\": k, \"accuracy\": np.mean(k_num_solved)})\n #print({\"system\": \"metagol\", \"playtask\": p, \"trial\": k, \"accuracy\": np.mean(k_num_solved)})\n all_num_solved.append(np.mean(k_num_solved))\n return (np.mean(all_num_solved)*100,stats.sem(all_num_solved)*100,all_accs)\n\n\ndef results():\n system_accs = {}\n for system in systems:\n system_accs[system]=[]\n for p in playtimes:\n (num_solved,sem,all_accs) = get_accs(system,p)\n system_accs[system].extend(all_accs)\n #print('({},{}) +- (0,{})'.format(p,round(num_solved,2),round(sem,2)))\n\ndef get_build_program_size(setup, p, k):\n programf = f\"./programs/{setup}/programs-{p}-{k}.pl\"\n count = 0\n fil = open(programf)\n\n for line in fil.readlines():\n if line.startswith('%') or len(line) < 3 or 'true' in line:\n continue\n elif line.startswith('b'):\n head, body = line.strip().split(':-')\n body = body.replace('.', ',')\n body = body.split('),')\n count += 1 + len(body)\n else:\n pass\n\n return count\n\ndef build_program_size():\n SETUP = f'playgol'\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = get_build_program_size(SETUP, p, t)\n runtimes.append({'type': 'refactored', 'playtasks': p, 'trial': t, 'runtime': seconds})\n print({\"system\": \"metagol\", \"playtask\": p, \"trial\": t, \"program_size\": seconds})\n\n return runtimes\n\ndef parse_runtime(setup, p, k):\n programf = f\"./programs/{setup}/programs-{p}-{k}.pl\"\n start_time = None\n end_time = None\n fil = open(programf)\n times = []\n\n for line in fil.readlines():\n if '% started solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n start_time = datetime.datetime(day=int(tmp[6]), month=int(tmp[7]), year=int(tmp[8]), hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n if '% finished solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n end_time = datetime.datetime(day=int(tmp[6]), month=int(tmp[7]), year=int(tmp[8]), hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n\n if end_time is None:\n end_time = datetime.datetime.fromtimestamp(os.path.getmtime(programf))\n return (end_time - start_time).total_seconds()\n\n#% started solving build tasks at 20 3 2020 21:49:16.799527645\n#% finished solving build tasks at 20 3 2020 21:49:21.883820056\ndef parse_runtime_new(setup, p, k):\n programf = f\"./programs/{setup}/programs-{p}-{k}.pl\"\n start_time = None\n end_time = None\n fil = open(programf)\n times = []\n\n for line in fil.readlines():\n if '% started solving build tasks' in line:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n start_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif '% finished solving build tasks' in line:\n tmp = line.strip().split()\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n time = tmp[-1].split(\":\")\n end_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif 'timeout' in line:\n times.append({'type': 'No refactoring', 'playtasks': p, 'trial': k, 'runtime': 60})\n start_time=None\n\n if start_time is not None and end_time is not None:\n times.append({'type': 'No refactoring', 'playtasks': p, 'trial': k, 'runtime': (end_time - start_time).total_seconds()})\n start_time=None\n end_time=None\n\n return times\n\n\ndef parse_runtime_trial(setup, p, k):\n programf = f\"./programs/{setup}/programs-{p}-{k}.pl\"\n start_time = None\n end_time = None\n fil = open(programf)\n times = []\n last_timeout_count = 0\n\n for line in fil.readlines():\n if '% started solving build tasks' in line and start_time == None:\n tmp = line.strip().split()\n time = tmp[-1].split(\":\")\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n start_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif '% finished solving build tasks' in line:\n tmp = line.strip().split()\n year=int(tmp[-2])\n month=int(tmp[-3])\n day=int(tmp[-4])\n time = tmp[-1].split(\":\")\n last_timeout_count = 0\n end_time = datetime.datetime(day=day, month=month, year=year, hour=int(time[0]), minute=int(time[1]), second=int(float(time[2])))\n elif 'timeout' in line:\n last_timeout_count += 1\n\n return {'type': 'No refactoring', 'playtasks': p, 'trial': k, 'runtime': (end_time - start_time).total_seconds() + last_timeout_count*60}\n\n\ndef runtimes_new():\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime_new('playgol', p, t)\n runtimes += seconds\n for item in seconds:\n print(item)\n #runtimes.append({'type': 'originl', 'playtasks': p, 'trial': t, 'runtime': seconds})\n #print({\"system\": \"metagol\", \"playtasks\": p, \"trial\": t, \"runtime\": seconds})\n\n return runtimes\n \n\ndef runtimes_trial():\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime_trial('playgol', p, t)\n print(str(seconds) + \",\")\n #runtimes.append({'type': 'originl', 'playtasks': p, 'trial': t, 'runtime': seconds})\n #print({\"system\": \"metagol\", \"playtasks\": p, \"trial\": t, \"runtime\": seconds})\n\n return runtimes\n\n\ndef runtimes():\n runtimes = []\n for p in playtimes:\n for t in trials:\n seconds = parse_runtime('playgol', p, t)\n runtimes.append({'type': 'originl', 'playtasks': p, 'trial': t, 'runtime': seconds})\n print({\"system\": \"metagol\", \"playtasks\": p, \"trial\": t, \"runtime\": seconds})\n\n return runtimes\n\n\ncmd = sys.argv[1]\ntrials = [int(x) for x in sys.argv[2].split(',')]\n\nif cmd == 'gen':\n gen_data_new()\nif cmd == 'learn':\n play_and_buid()\nif cmd == 'test':\n test()\nif cmd == 'results':\n results()\nif cmd == 'program-size':\n build_program_size()\nif cmd == 'runtime':\n runtimes_new()\nif cmd == 'runtime_trial':\n runtimes_trial()\n" }, { "alpha_fraction": 0.529411792755127, "alphanum_fraction": 0.5686274766921997, "avg_line_length": 11.5, "blob_id": "5f3c0a253184645a34d24dccc17a6e8e769e80fc", "content_id": "d2b866b0097797419657026a26f8cc75aa591776", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 51, "license_type": "no_license", "max_line_length": 22, "num_lines": 4, "path": "/loreleai/language/utils.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "MUZ = \"muz\"\nLP = 1\nFOL = 2\nKANREN_LOGPY = \"logpy\"\n\n" }, { "alpha_fraction": 0.6546112298965454, "alphanum_fraction": 0.6638336181640625, "avg_line_length": 37.943660736083984, "blob_id": "a3cc33fbacc47a4d6242a0bc3381fb4c762a6ad6", "content_id": "550f680351c35fa64775846dfcc3509b4f3c4e7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5530, "license_type": "no_license", "max_line_length": 231, "num_lines": 142, "path": "/experiments/knorf/refactor_programs.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "import datetime\nimport os\nimport sys\n\nfrom loreleai.language.lp import ClausalTheory\nfrom loreleai.learning.restructuring import Restructor, NUM_LITERALS\n\nROOT_FOLDER = os.path.abspath(os.path.dirname(__file__))\nTRIAL = [1,2,3,4,5,6,7,8,9,10]\nNUMBER_PLAY_TASKS = list(range(200, 4200, 200)) #[200, 400, 600, 800, 1000]\nTHREADS = 4\nMAX_TIME_S = 1 * 2 * 60 # hour * min * sec\nMAX_LAYERS = None\nMIN_LITERALS = 2\nMAX_LITERALS = 2\nPRUNE = True\nEXCLUDE_ALTERNATIVES = False\nEXCLUDE_REDUNDANT_CANDS = True\nEXCLUDE_REDUNDANCIES = True\nREJECT_SINGLETONS = True\nMINIMISE_REDUNDANCIES = True\n\n\ndef refactor_theory(input_theory_file, output_theory_file, max_literals, min_literals, max_layers, max_time, prune, alternatives, redundant_cands, redundancies, reject_singletons, minimise_red):\n import logging\n logging.root.handlers = []\n\n theory = ClausalTheory(read_from_file=input_theory_file)\n unfolded_theory = theory.unfold()\n\n original_theory_preds = theory.get_predicates()\n unfolded_theory_preds = unfolded_theory.get_predicates()\n difference_preds = original_theory_preds.difference(unfolded_theory_preds)\n\n frms = unfolded_theory.get_formulas()\n\n unfolded_theory = ClausalTheory([x for x in frms if len(x) > 1 or len(x) == 1 and x.get_head().get_predicate() in theory._recursive_predicates])\n unfolded_theory.remove_duplicates()\n\n restructurer = Restructor(max_literals=max_literals, min_literals=min_literals,\n max_arity=2, head_variable_selection=2,\n logl=logging.INFO, logfile=f\"{output_theory_file}.log\",\n minimise_redundancy=minimise_red, exact_redundancy=False, prevent_redundancies=redundancies,\n exclude_alternatives=alternatives, objective_type=NUM_LITERALS,\n exclude_redundant_cands=redundant_cands, reject_singleton=reject_singletons)\n\n restructurer._logger.info(f\"Max number of predicates: {len(difference_preds)}\")\n\n cls, thr = restructurer.restructure(unfolded_theory, max_layers=max_layers,\n max_predicate=len(difference_preds), num_threads=THREADS,\n max_time_s=max_time, prune_candidates=prune)\n\n out_th = open(output_theory_file, 'w')\n for frm in thr.get_formulas():\n out_th.write(str(frm) + \"\\n\")\n\n out_th.close()\n\n # theory.visualize(f'{output_theory_file}_visual_original.pdf', only_numbers=True)\n # thr.visualize(f'{output_theory_file}_visual_refactored.pdf', only_numbers=True)\n\n restructurer._logger.info(f\"Original theory has {len(difference_preds)} invented predicates, refactored one {len(cls)}\")\n restructurer._logger.info(f\"Original theory has {theory.num_literals()} literals, while refactored one {thr.num_literals()}\")\n\n\nDOMAIN = sys.argv[1]\nsetup = sys.argv[2]\n\nother_setup = sys.argv[3] # expected to be in form parameter:value separated with white-space\nother_setup = other_setup.strip().split()\nother_setup = dict([tuple(c.split(\":\")) for c in other_setup])\n\nif 'maxl' in other_setup:\n MAX_LITERALS = int(other_setup['maxl'])\n\nif 'minl' in other_setup:\n MIN_LITERALS = int(other_setup['minl'])\n\nif 't' in other_setup:\n if ',' not in other_setup['t']:\n TRIAL = [int(other_setup['t'])]\n else:\n TRIAL = [int(x) for x in other_setup['t'].split(',')]\n\nif 'mlay' in other_setup:\n MAX_LAYERS = int(other_setup['mlay'])\n\nif 'pt' in other_setup:\n if ',' not in other_setup['pt']:\n NUMBER_PLAY_TASKS = [int(other_setup['pt'])]\n else:\n NUMBER_PLAY_TASKS = [int(x) for x in other_setup['pt'].split(\",\")]\n\nif 'mt' in other_setup:\n MAX_TIME_S = int(other_setup['mt'])\n\nif 'p' in other_setup:\n PRUNE = bool(other_setup['p'])\n\nif 'a' in other_setup:\n EXCLUDE_ALTERNATIVES = True if other_setup['a'] == 'true' else False\n\nif 'rc' in other_setup:\n EXCLUDE_REDUNDANT_CANDS = bool(other_setup['rc'])\n\nif 'rr' in other_setup:\n EXCLUDE_REDUNDANCIES = bool(other_setup['rr'])\n\nif 'mr' in other_setup:\n MINIMISE_REDUNDANCIES = bool(other_setup['mr'])\n\nif 'thr' in other_setup:\n THREADS = int(other_setup['thr'])\n\nif 'rs' in other_setup:\n REJECT_SINGLETONS = bool(other_setup['rs'])\n\n\nPROGRAM_FOLDER = ROOT_FOLDER + f\"/programs_{setup}\"\nOUTPUT_FOLDER = ROOT_FOLDER + f\"/refactored_programs_{setup}\"\nSETUP = f'literals{MIN_LITERALS}-{MAX_LITERALS}_layer{MAX_LAYERS}_time{MAX_TIME_S}s_prune{PRUNE}_alt{EXCLUDE_ALTERNATIVES}_rcands{EXCLUDE_REDUNDANT_CANDS}_rr{EXCLUDE_REDUNDANCIES}_mr{MINIMISE_REDUNDANCIES}_singl{REJECT_SINGLETONS}'\n\nif not os.path.exists(OUTPUT_FOLDER):\n os.mkdir(OUTPUT_FOLDER)\n\nif not os.path.exists(f'{OUTPUT_FOLDER}/{DOMAIN}'):\n os.mkdir(f'{OUTPUT_FOLDER}/{DOMAIN}')\n\nif not os.path.exists(f'{OUTPUT_FOLDER}/{DOMAIN}/{SETUP}'):\n os.mkdir(f'{OUTPUT_FOLDER}/{DOMAIN}/{SETUP}')\n\nfor num_plays in NUMBER_PLAY_TASKS:\n for trial in TRIAL:\n file_name = f'programs-{num_plays}-{trial}.pl'\n refactored_fn = f'refactored_{file_name}'\n\n print(f\"[{datetime.datetime.now()}] working on {file_name}\")\n print(f\" refactoring to {refactored_fn}\")\n\n refactor_theory(f'{PROGRAM_FOLDER}/{DOMAIN}/{file_name}', f'{OUTPUT_FOLDER}/{DOMAIN}/{SETUP}/{refactored_fn}',\n MAX_LITERALS, MIN_LITERALS, MAX_LAYERS, MAX_TIME_S, PRUNE, EXCLUDE_ALTERNATIVES,\n EXCLUDE_REDUNDANT_CANDS, EXCLUDE_REDUNDANCIES, REJECT_SINGLETONS, MINIMISE_REDUNDANCIES)\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 22, "blob_id": "d85ea628cdd3264bf697d382cce0624c144c8296", "content_id": "bce13a45aaa181b9347e91482719aca9af1bc5b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/loreleai/language/__init__.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "from .utils import MUZ" }, { "alpha_fraction": 0.5971107482910156, "alphanum_fraction": 0.5995184779167175, "avg_line_length": 37.15306091308594, "blob_id": "e5b983fa60a3b8ddb5a38a6030299505832908bf", "content_id": "d43e13fee23f487aa1965a1b8e3d30c8aebac54b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3738, "license_type": "no_license", "max_line_length": 118, "num_lines": 98, "path": "/loreleai/reasoning/lp/kanren/minikanren.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "from typing import Union, Dict, Sequence, Tuple\n\nimport kanren\n\nfrom loreleai.language.kanren import Constant, Type, Variable, Predicate, Literal, Clause, c_const, \\\n construct_recursive_rule\nfrom loreleai.language.utils import KANREN_LOGPY\nfrom ..lpsolver import LPSolver\n\n\nclass MiniKanren(LPSolver):\n\n def __init__(self, knowledge_base=None, background_knowledge=None):\n super().__init__(KANREN_LOGPY, knowledge_base, background_knowledge)\n\n def declare_constant(self, elem_constant: Constant) -> None:\n elem_constant.add_engine_object(elem_constant.name)\n\n def declare_type(self, elem_type: Type) -> None:\n # No types in miniKanren\n pass\n\n def declare_variable(self, elem_variable: Variable) -> None:\n v = kanren.var()\n elem_variable.add_engine_object(v)\n\n def declare_predicate(self, elem_predicate: Predicate) -> None:\n # predicate declared once facts and rules are added\n # predicates used as facts need to be declared as\n pass\n\n def assert_fact(self, fact: Literal) -> None:\n try:\n fact.get_predicate().get_engine_obj(KANREN_LOGPY)\n except Exception:\n fact.get_predicate().add_engine_object((KANREN_LOGPY, kanren.Relation()))\n\n kanren.fact(fact.get_predicate().get_engine_obj(KANREN_LOGPY),\n *[x.as_kanren() for x in fact.get_terms()]\n )\n\n def assert_rule(self, rule: Union[Clause, Sequence[Clause]]) -> None:\n # only needs to add a miniKanren object to the predicate in the head\n if isinstance(rule, Clause):\n if rule.is_recursive():\n raise Exception(f\"recursive rule needs to be added together with the base base: {rule}\")\n else:\n obj = rule.as_kanren()\n rule.get_head().get_predicate().add_engine_object((KANREN_LOGPY, obj))\n else:\n\n obj = construct_recursive_rule(rule)\n\n rule[0].get_head().get_predicate().add_engine_object((KANREN_LOGPY, obj))\n\n def _query(self, query: Union[Literal, Clause], num_sols=1) -> Tuple[Sequence[Sequence[str]], Sequence[Variable]]:\n if isinstance(query, Literal):\n vars = [x.as_kanren() for x in query.get_variables()]\n ori_vars = [x for x in query.get_variables()]\n if len(vars) == 0:\n # needed in case\n ori_vars = [x.as_kanren() for x in query.get_terms()]\n else:\n vars = [x.as_kanren() for x in query.get_head().get_variables()]\n ori_vars = [x for x in query.get_head().get_variables()]\n\n if isinstance(query, Literal):\n goals = [query.as_kanren()]\n else:\n goals = [x.as_kanren() for x in query.get_atoms()]\n\n return kanren.run(num_sols, vars, *goals), ori_vars\n\n def has_solution(self, query: Union[Literal, Clause]) -> bool:\n if isinstance(query, (Literal, Clause)):\n res, _ = self._query(query, num_sols=1)\n\n return True if res else False\n else:\n raise Exception(f\"cannot query {type(query)}\")\n\n def one_solution(self, query: Union[Literal, Clause]) -> Dict[Variable, Constant]:\n res, vars = self._query(query, num_sols=1)\n\n if len(res) == 0:\n return {}\n\n return dict(zip(vars, [c_const(x, vars[ind].get_type()) for ind, x in enumerate(res[0])]))\n\n def all_solutions(self, query: Union[Literal, Clause]) -> Sequence[Dict[Variable, Constant]]:\n res, vars = self._query(query, num_sols=0)\n\n if len(res) == 0:\n return []\n\n return [\n dict(zip(vars, [c_const(y, vars[ind].get_type()) for ind, y in enumerate(x)])) for x in res\n ]" }, { "alpha_fraction": 0.5544053912162781, "alphanum_fraction": 0.5559868812561035, "avg_line_length": 31.502336502075195, "blob_id": "2f8f1a2169f6cdd26d43c9f83f54661f548e0869", "content_id": "de0fea2b42ad809da4d022ccaec05e04d3e7dd3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 41733, "license_type": "no_license", "max_line_length": 157, "num_lines": 1284, "path": "/loreleai/language/commons.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "from abc import ABC\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom itertools import combinations, product\nfrom typing import Dict, List, Tuple, Sequence, Set, Union, Iterator\n\nimport kanren\nimport networkx as nx\nimport z3\n\nfrom .utils import MUZ, LP, FOL, KANREN_LOGPY\n\n\nclass Type:\n def __init__(self, name: str):\n self.name = name\n self.elements = set()\n self._engine_objects = {}\n\n def add(self, elem):\n self.elements.add(elem)\n\n def remove(self, elem):\n self.elements.remove(elem)\n\n def add_engine_object(self, elem):\n if z3.is_sort(elem):\n self._engine_objects[MUZ] = elem\n else:\n raise Exception(f\"unknown Type object {type(elem)}\")\n\n def get_engine_obj(self, eng):\n assert eng in [MUZ, KANREN_LOGPY]\n return self._engine_objects[eng]\n\n def as_muz(self):\n return self._engine_objects[MUZ]\n\n def as_kanren(self):\n raise Exception(\"types not supported in kanren\")\n\n def __add__(self, other):\n self.add(other)\n\n def __repr__(self):\n return self.name\n\n def __len__(self):\n return len(self.elements)\n\n def __eq__(self, other):\n if isinstance(self, type(other)):\n return self.name == other.name\n else:\n return False\n\n def __hash__(self):\n return hash(self.__repr__())\n\n\nclass Term:\n \"\"\"\n Term base class. A common base class for Predicate, Constant, Variable and Functor symbols.\n \"\"\"\n\n def __init__(self, name, sym_type):\n self.name = name\n self.type = sym_type\n self.hash_cache = None\n self._engine_objects = {}\n\n def arity(self) -> int:\n \"\"\"\n Returns the arity of the term\n\n Returns:\n int\n \"\"\"\n raise Exception(\"Not implemented!\")\n\n def get_type(self) -> \"Type\":\n \"\"\"\n Returns the type of the term\n \"\"\"\n return self.type\n\n def get_name(self) -> str:\n \"\"\"\n Returns the name of the term\n\n Return:\n [str]\n \"\"\"\n return self.name\n\n def add_engine_object(self, elem) -> None:\n \"\"\"\n Adds an engine object representing the\n\n \"\"\"\n raise NotImplementedError()\n\n def as_muz(self):\n \"\"\"\n Returns the object's representation in Z3 Datalog engine (muZ)\n \"\"\"\n return self._engine_objects[MUZ]\n\n def as_kanren(self):\n \"\"\"\n Returns the object's representation in the miniKanren engine\n \"\"\"\n return self._engine_objects[KANREN_LOGPY]\n\n def get_engine_obj(self, eng):\n assert eng in [MUZ, KANREN_LOGPY]\n return self._engine_objects[eng]\n\n def __eq__(self, other):\n if isinstance(self, type(other)):\n return self.name == other.name and self.type == other.type\n else:\n return False\n\n def __repr__(self):\n return self.name\n\n def __hash__(self):\n if self.hash_cache is None:\n self.hash_cache = hash(self.__repr__())\n return self.hash_cache # hash(self.__repr__())\n\n\n@dataclass\nclass Constant(Term):\n \"\"\"\n Implements a constant in\n \"\"\"\n\n def __init__(self, name, sym_type):\n super().__init__(name, sym_type)\n self._id = len(sym_type)\n self.type.add(self)\n\n def arity(self) -> int:\n return 1\n\n def id(self) -> int:\n return self._id\n\n def add_engine_object(self, elem):\n if z3.is_bv_value(elem):\n self._engine_objects[MUZ] = elem\n elif isinstance(elem, str):\n self._engine_objects[KANREN_LOGPY] = elem\n else:\n raise Exception(f\"unsupported Constant object {type(elem)}\")\n\n def __repr__(self):\n return self.name\n\n def __hash__(self):\n if self.hash_cache is None:\n self.hash_cache = hash(self.__repr__())\n return self.hash_cache # hash(self.__repr__())\n\n\n@dataclass\nclass Variable(Term):\n \"\"\"\n Implements a Variable functionality\n \"\"\"\n\n def __init__(self, name: str, sym_type):\n if name[0].islower():\n raise Exception(\"Variables should uppercase!\")\n super().__init__(name, sym_type)\n\n def arity(self):\n return 1\n\n def add_engine_object(self, elem):\n if z3.is_expr(elem):\n self._engine_objects[MUZ] = elem\n elif isinstance(elem, kanren.Var):\n self._engine_objects[KANREN_LOGPY] = elem\n else:\n raise Exception(f\"unsupported Variable object: {type(elem)}\")\n\n def __repr__(self):\n return self.name\n\n def __hash__(self):\n if self.hash_cache is None:\n self.hash_cache = hash(self.__repr__() + \"/\" + str(self.type))\n return self.hash_cache # hash(self.__repr__() + \"/\" + str(self.type))\n\n def __eq__(self, other):\n if isinstance(self, type(other)):\n return self.name == other.name and self.type == other.type\n else:\n return False\n\n\n@dataclass\nclass Structure(Term):\n def __init__(self, name: str, sym_type, arguments):\n super(Structure, self).__init__(name, sym_type)\n self.arguments = arguments\n\n def __repr__(self):\n return \"{}({})\".format(self.name, \",\".join(self.arguments))\n\n def __eq__(self, other):\n if isinstance(self, type(other)):\n return (\n self.name == other.type\n and len(self.arguments) == len(other.arguments)\n and all([x == y for (x, y) in zip(self.arguments, other.arguments)])\n )\n else:\n return False\n\n def arity(self):\n return len(self.arguments)\n\n def add_engine_object(self, elem):\n raise NotImplementedError()\n\n\n@dataclass\nclass Predicate:\n def __init__(self, name: str, arity: int, arguments: List[Type] = None):\n self.name = name\n self.arity = arity\n self.argument_types = (\n arguments if arguments else [Type(\"thing\") for _ in range(arity)]\n )\n self.hash_cache = None\n self._engine_objects = {}\n\n def get_name(self) -> str:\n return self.name\n\n def get_arity(self) -> int:\n return self.arity\n\n def get_arg_types(self) -> List[Type]:\n return self.argument_types\n\n def signature(self) -> Tuple[str, int]:\n return self.name, self.get_arity()\n\n def add_engine_object(self, elem):\n if isinstance(elem, tuple):\n # add object as (engine name, object)\n assert elem[0] in [MUZ, KANREN_LOGPY]\n self._engine_objects[elem[0]] = elem[1]\n elif z3.is_func_decl(elem):\n self._engine_objects[MUZ] = elem\n elif isinstance(elem, kanren.Relation):\n self._engine_objects[KANREN_LOGPY] = elem\n else:\n raise Exception(f\"unsupported Predicate object {type(elem)}\")\n\n def get_engine_obj(self, eng):\n assert eng in [MUZ, KANREN_LOGPY]\n return self._engine_objects[eng]\n\n def as_muz(self):\n return self._engine_objects[MUZ]\n\n def as_kanren(self):\n return self._engine_objects[KANREN_LOGPY]\n\n def __eq__(self, other):\n if isinstance(self, type(other)):\n return (\n self.get_name() == other.get_name()\n and self.get_arity() == other.get_arity()\n and all(\n [\n x == y\n for (x, y) in zip(self.argument_types, other.get_arg_types())\n ]\n )\n )\n else:\n return False\n\n def __repr__(self):\n return \"{}({})\".format(\n self.name, \",\".join([str(x) for x in self.argument_types])\n )\n\n def __hash__(self):\n if self.hash_cache is None:\n self.hash_cache = hash(self.__repr__())\n return self.hash_cache\n\n def __call__(self, *args, **kwargs):\n assert len(args) == self.get_arity()\n assert all([isinstance(x, (Constant, Variable, Structure)) for x in args])\n global global_context\n\n if global_context.get_logic() == LP:\n return Literal(self, list(args))\n else:\n raise Exception(\"FOL not supported yet!\")\n\n\nclass Formula:\n def __init__(self):\n self._properties = {}\n self._hash_cache = None\n\n def substitute(self, term_map: Dict[Term, Term]):\n raise Exception(\"Not implemented yet!\")\n\n def get_variables(self):\n raise Exception(\"Not implemented yet!\")\n\n def get_terms(self):\n raise Exception(\"Not implemented yet!\")\n\n def get_predicates(self) -> Set[Predicate]:\n raise Exception(\"Not implemented yet!\")\n\n def add_property(self, property_name: str, value):\n self._properties[property_name] = value\n\n def get_property(self, property_name: str):\n return self._properties.get(property_name, None)\n\n def has_singleton_var(self) -> bool:\n raise Exception(\"Not implemented yet!\")\n\n def as_muz(self):\n raise NotImplementedError()\n\n def as_kanren(self, base_case_recursion=None):\n raise NotImplementedError()\n\n def __hash__(self):\n if self._hash_cache is None:\n self._hash_cache = hash(self.__repr__())\n\n return self._hash_cache\n\n\n@dataclass\nclass Not(Formula):\n def __init__(self, formula: Formula):\n super(Not, self).__init__()\n self.formula = formula\n\n def substitute(self, term_map: Dict[Term, Term]):\n return Not(self.formula.substitute(term_map))\n\n def get_variables(self) -> List[Variable]:\n return self.formula.get_variables()\n\n def get_terms(self) -> List[Term]:\n return self.formula.get_terms()\n\n def get_formula(self) -> Formula:\n return self.formula\n\n def get_predicates(self) -> Set[Predicate]:\n return self.formula.get_predicates()\n\n def as_muz(self):\n return z3.Not(self.formula.as_muz())\n\n def as_kanren(self, base_case_recursion=None):\n raise Exception(\"miniKanren does not support negation\")\n\n def __hash__(self):\n if self._hash_cache is None:\n self._hash_cache = hash(self.__repr__())\n\n return self._hash_cache\n\n\n@dataclass\nclass Literal(Formula):\n def __init__(self, predicate: Predicate, arguments: List[Term]):\n super(Literal, self).__init__()\n self.predicate = predicate\n self.arguments = arguments\n self.arg_signature = []\n\n def substitute(self, term_map: Dict[Term, Term]):\n return c_literal(\n self.predicate,\n [term_map[x] if x in term_map else x for x in self.arguments],\n )\n\n def get_predicate(self) -> Predicate:\n return self.predicate\n\n def get_predicates(self) -> Set[Predicate]:\n return {self.get_predicate()}\n\n def get_variables(self) -> List[Variable]:\n return [x for x in self.arguments if isinstance(x, Variable)]\n\n def get_terms(self) -> List[Term]:\n return [x for x in self.arguments]\n\n def as_muz(self):\n args = [x.as_muz() for x in self.arguments]\n return self.predicate.as_muz()(*args)\n\n def as_kanren(self, base_case_recursion=None):\n # not used here, provides base cases forthe recursion\n args = [x.as_kanren() for x in self.arguments]\n return self.predicate.as_kanren()(*args)\n\n def __repr__(self):\n return \"{}({})\".format(\n self.predicate.get_name(), \",\".join([str(x) for x in self.arguments])\n )\n\n def __eq__(self, other):\n if isinstance(self, type(other)):\n return (\n self.predicate == other.predicate and self.arguments == other.arguments\n )\n else:\n return False\n\n def __and__(self, other) -> \"Body\":\n return Body(self, other)\n\n def __le__(self, other: Union[\"Literal\", \"Body\"]) -> \"Clause\":\n if isinstance(other, Body):\n return Clause(self, other)\n else:\n return Clause(self, [other])\n\n def __hash__(self):\n if self._hash_cache is None:\n self._hash_cache = hash(self.__repr__())\n\n return self._hash_cache\n\n\n@dataclass\nclass Body:\n def __init__(self, *literals):\n self._literals = list(literals)\n\n def get_literals(self):\n return self._literals\n\n def __and__(self, other) -> \"Body\":\n if isinstance(other, Literal):\n self._literals += [other]\n return self\n elif isinstance(other, Body):\n self._literals += other.get_literals()\n return self\n else:\n raise Exception(\n f\"Body can be constructed only with Atom or Body, not {type(other)}\"\n )\n\n\n@dataclass\nclass Clause(Formula):\n \"\"\"\n Implements the clause functionality\n\n Args:\n head (Literal): head atom of the clause\n body (List(Atom)): list of atoms in the body of the clause\n \"\"\"\n\n def __init__(self, head: Literal, body: Union[List[Literal], Body]):\n super(Clause, self).__init__()\n self._head: Literal = head\n\n if isinstance(body, Body):\n self._body: Sequence[Literal] = body.get_literals()\n else:\n self._body: Sequence[Literal] = body\n self._body = self._get_atom_order()\n self._terms = set()\n self._repr_cache = None\n self.term_signatures = None\n self.inverted_term_signatures = None\n\n for lit in self._body:\n self._terms = self._terms.union(lit.get_terms())\n\n def substitute(self, term_map: Dict[Term, Term]):\n \"\"\"\n Substitute the terms in the clause\n\n Args:\n term_map (Dict[Term, Term]): mapping of the terms to their replacements\n (key: term from the clause, value: new term to replace it with)\n\n Return:\n new clause with the replaced literals\n \"\"\"\n return Clause(\n self._head.substitute(term_map),\n list(map(lambda x: x.substitute(term_map), self._body)),\n )\n\n def get_predicates(self) -> Set[Predicate]:\n \"\"\"\n Returns the predicates in the clause\n \"\"\"\n return set([x.get_predicate() for x in self._body])\n\n def get_variables(self) -> Set[Variable]:\n \"\"\"\n Returns the of variables in the clause\n \"\"\"\n variables = set()\n\n for atom in self._body:\n variables = variables.union(atom.get_variables())\n\n return variables\n\n def get_atoms(self, with_predicates: Set[Predicate] = None) -> List[Literal]:\n \"\"\"\n Returns the set of atoms in the clause\n\n Args:\n with_predicates (Set[Predicates], optional): return only atoms with these predicates\n \"\"\"\n if with_predicates is None:\n return self._body\n else:\n return [x for x in self._body if x.get_predicate() in with_predicates]\n\n def get_head(self):\n return self._head\n\n def get_term_signatures(self):\n if self.term_signatures is None:\n self.term_signatures = _create_term_signatures(self._body)\n self.inverted_term_signatures = dict(\n [(frozenset(v.items()), k) for k, v in self.term_signatures.items()]\n )\n\n return self.term_signatures\n\n def has_singleton_var(self) -> bool:\n var_count = {}\n for v in self._head.get_variables():\n if v not in var_count:\n var_count[v] = 0\n var_count[v] += 1\n\n for atm in self._body:\n for v in atm.get_variables():\n if v not in var_count:\n var_count[v] = 0\n var_count[v] += 1\n\n return len([1 for k, v in var_count.items() if v == 1]) > 0\n\n def _check_for_unification_with_body(\n self, literals: List[Union[Literal, Not]]\n ) -> List[Dict[Term, Term]]:\n \"\"\"\n Checks whether the body of the clause unifies with the provided set of literals\n\n Args:\n literals (List[Union[Atom, Not]]): literals (another clause)\n\n Returns:\n A list of possible substitutions\n (each substitution is a dictionary where the\n -- key is the variables from the clause which should be substituted\n -- the values are the terms from the literals which should be used as substitutions)\n\n # TODO: check whether everything is correct for working with constants\n \"\"\"\n if self.term_signatures is None:\n self.term_signatures = _create_term_signatures(self._body)\n self.inverted_term_signatures = dict(\n [(frozenset(v.items()), k) for k, v in self.term_signatures.items()]\n )\n\n test_clause_literals = _create_term_signatures(literals)\n clause_literals = self.inverted_term_signatures\n\n test_clause_literals = dict(\n [(frozenset(v.items()), k) for k, v in test_clause_literals.items()]\n )\n matches = dict(\n [\n (clause_literals[x], test_clause_literals[x])\n for x in (clause_literals.keys() & test_clause_literals.keys())\n ]\n )\n\n if len(matches) < len(clause_literals):\n return [{}]\n elif len(matches) == len(clause_literals):\n return [matches]\n else:\n raise Exception(\"Multiple unifications possible: not implemented yet!\")\n\n def is_part_of(\n self, clause: Union[\"Clause\", \"ClausalConstruct\"]\n ) -> List[Tuple[List[Literal], Dict[Term, Term]]]:\n \"\"\"\n Checks whether the body of (self.)clause unifies with the part of the body of the provided clause\n\n Args:\n clause (Union[Clause, ClausalConstruct]): is self.clause part of this clause?\n\n Return:\n a list of tuples:\n - first elements of the tuple is the list of atoms that can be substituted\n - the second element is the dictionary representing the mapping from variables in self.clause to the\n variables is the provided clause\n \"\"\"\n if isinstance(self, type(clause)):\n if len(self) > len(clause):\n return []\n elif (\n len(self) == len(clause)\n and self.get_predicates() != clause.get_predicates()\n ):\n return []\n else:\n found_substitutions = []\n\n if isinstance(clause, Clause):\n clauses_to_check = [clause]\n else:\n clauses_to_check = clause.get_clauses()\n\n # construct potential sub-formulas that can be matched\n for cl in clauses_to_check:\n matching_literals = cl.get_atoms(\n with_predicates=self.get_predicates()\n )\n for comb in combinations(matching_literals, len(self)):\n if not are_variables_connected(comb):\n continue\n comb = list(comb)\n answer = self._check_for_unification_with_body(comb)\n found_substitutions += [(comb, x) for x in answer if len(x)]\n\n return found_substitutions\n else:\n return []\n\n def substitute_atoms(\n self,\n atoms_to_replace: List[Union[Literal, Not]],\n new_atom: Literal,\n substitutes: Dict[Term, Term],\n ) -> \"Clause\":\n \"\"\"\n Substitutes some atoms in the body with a new atoms\n\n Args:\n atoms_to_replace (list[Literal]): atom to replace in the clause\n new_atom (Literal): atom to use as the replacement\n substitutes (Dict[Term, Term]): terms substitutes to use in the new atom\n \"\"\"\n return Clause(\n self._head,\n [new_atom.substitute(substitutes)]\n + [x for x in self._body if x not in atoms_to_replace],\n )\n\n def substitute_predicate_occurence(self, old_predicate: Predicate, new_predicate: Predicate) -> 'Clause':\n atms = [x if x.get_predicate() != old_predicate else Literal(new_predicate, x.get_terms()) for x in self._body]\n return Clause(self._head, atms)\n\n def unfold_with(\n self, clauses: Union[\"Clause\", Iterator[\"Clause\"]]\n ) -> Iterator[\"Clause\"]:\n \"\"\"\n Unfolds the clause with given clauses\n If more than one clause is given for unfolding, assumes no clauses with the same head are provided\n\n Args:\n clauses [Union[Clause, List[Clauses]]: clauses to use for unfolding\n\n Returns:\n unfolded clause [Clause]\n \"\"\"\n # TODO: change this to work with a single clause for unfolding\n\n if isinstance(clauses, Clause):\n clauses = [clauses]\n\n _new_body_atoms = []\n _forbidden_var_names = [x.get_name() for x in self.get_variables()]\n\n for atm_ind, atm in enumerate(self._body):\n matching_clauses = [\n x\n for x in clauses\n if x.get_head().get_predicate() == atm.get_predicate()\n ]\n\n if atm.get_predicate() == self._head.get_predicate():\n # if recursive literals, just leave it in the body\n matching_clauses = []\n\n # rename variables in all matching clauses\n renamed_clauses = []\n for cl_ind, cl in enumerate(matching_clauses):\n var_map = {}\n\n for v in cl.get_variables():\n alternative_name = f\"{v.get_name()}{atm_ind}_{cl_ind}\"\n cnt = 1\n\n # if the same name appears in the rest of the clause; happens with recursive unfolding\n if alternative_name in _forbidden_var_names:\n alternative_name = alternative_name + f\"-{cnt}\"\n while alternative_name in _forbidden_var_names:\n alternative_name = alternative_name.split(\"-\")[0]\n cnt += 1\n alternative_name = alternative_name + f\"-{cnt}\"\n\n var_map[v] = c_var(alternative_name, v.get_type())\n\n renamed_clauses.append(cl.substitute(var_map))\n\n matching_clauses = renamed_clauses\n\n if len(matching_clauses):\n candidate_atoms = []\n\n for mcl in matching_clauses:\n var_map_matching_clause = dict(\n zip(mcl.get_head().get_variables(), atm.get_variables())\n )\n candidate_atoms.append(\n [x.substitute(var_map_matching_clause) for x in mcl.get_atoms()]\n )\n\n _new_body_atoms.append(candidate_atoms)\n else:\n _new_body_atoms.append([[atm]])\n\n return [\n Clause(self._head, reduce(lambda u, v: u + v, x))\n for x in product(*_new_body_atoms)\n ]\n\n def is_recursive(self) -> bool:\n \"\"\"\n Returns true if the clause is recursive\n \"\"\"\n return self._head.get_predicate() in [x.get_predicate() for x in self._body]\n\n def as_muz(self):\n return self._head.as_muz(), [x.as_muz() for x in self._body]\n\n def as_kanren(self, base_case_recursion=None):\n if self.is_recursive():\n raise Exception(f\"recursive rules should not be constructed with .as_kanren() method but should use 'construct_recursive' from kanren package\")\n # Should associate a conj goal with the predicate in the head\n # has to be a function\n # rename all variables to make sure there are no strange effects\n\n # head vars need to be bound to input args of the function\n head_vars = dict([(x, ind) for ind, x in enumerate(self._head.get_variables())])\n\n # all other arguments need to be bound to their kanren constructs\n other_args = [x.get_terms() for x in self._body]\n other_args = set(reduce(lambda x, y: x + y, other_args, []))\n # remove head variables; these should be bounded to the function arguments\n other_args = [x for x in other_args if x not in head_vars]\n\n def generic_predicate(*args, core_obj=self, hvars=head_vars, ovars=other_args):\n vars_to_use = dict([(v, kanren.var()) for v in ovars])\n return kanren.conde(\n [x.get_predicate().as_kanren()(\n *[args[hvars[y]] if y in hvars else vars_to_use[y] for y in x.get_terms()]\n )\n for x in core_obj.get_atoms()]\n )\n\n return generic_predicate\n\n def __contains__(self, item):\n if isinstance(item, Predicate):\n return item.get_name() in map(lambda x: x.predicate.name, self._body)\n elif isinstance(item, Literal):\n return (\n len(\n [\n x\n for x in self._body\n if x.predicate.get_name() == item.get_predicate().get_name()\n ]\n )\n > 0\n )\n else:\n return False\n\n def __add__(self, other: Literal):\n Clause(self._head, self._body + [other])\n\n def __len__(self):\n return len(self._body)\n\n def __and__(self, other: Literal):\n self._body += [other]\n self._body = self._get_atom_order()\n return self\n\n def _get_atom_order(self):\n head_vars = self._head.get_variables()\n all_atoms = [x for x in self._body]\n focus_vars = [head_vars[0]]\n processed_vars = set()\n atom_order = []\n\n while len(all_atoms) > 0:\n matching_atms = [\n x\n for x in all_atoms\n if any([y in focus_vars for y in x.get_variables()])\n ]\n matching_atms = sorted(\n matching_atms,\n key=lambda x: min(\n [\n x.get_variables().index(y) if y in x.get_variables() else 5\n for y in focus_vars\n ]\n ),\n )\n processed_vars = processed_vars.union(focus_vars)\n atom_order += matching_atms\n all_atoms = [x for x in all_atoms if x not in matching_atms]\n focus_vars = reduce(\n (lambda x, y: x + y),\n [x.get_variables() for x in matching_atms if x not in processed_vars],\n )\n\n return atom_order\n\n def __repr__(self):\n if self._repr_cache is None:\n # head_vars = self._head.get_variables()\n # all_atoms = [x for x in self._body]\n # focus_vars = [head_vars[0]]\n # processed_vars = set()\n # atom_order = []\n #\n # while len(all_atoms) > 0:\n # matching_atms = [x for x in all_atoms if any([y in focus_vars for y in x.get_variables()])]\n # matching_atms = sorted(matching_atms, key=lambda x: min([x.get_variables().index(y) if y in x.get_variables() else 5 for y in focus_vars]))\n # processed_vars = processed_vars.union(focus_vars)\n # atom_order += matching_atms\n # all_atoms = [x for x in all_atoms if x not in matching_atms]\n # focus_vars = reduce((lambda x, y: x + y), [x.get_variables() for x in matching_atms if x not in processed_vars])\n\n self._repr_cache = \"{} :- {}\".format(\n self._head, \",\".join([str(x) for x in self._body])\n )\n return self._repr_cache\n\n def __hash__(self):\n if self._hash_cache is None:\n var_map = {}\n for var in self._head.get_variables():\n if var not in var_map:\n var_map[var] = len(var_map)\n\n for atm in self._body:\n for v in atm.get_variables():\n if v not in var_map:\n var_map[v] = len(var_map)\n\n head_rep = f\"{self._head.get_predicate().get_name()}({','.join([str(var_map[x] for x in self.get_variables())])})\"\n bodies = [\n f\"{x.get_predicate().get_name()}({','.join([str(var_map[t]) if t in var_map else str(t) for t in x.get_terms()])})\"\n for x in self._body\n ]\n bodies = \",\".join(bodies)\n\n self._hash_cache = hash(f\"{head_rep} :- {bodies}\")\n\n return self._hash_cache # hash(self.__repr__())\n\n\nclass ClausalConstruct(ABC):\n\n def __init__(self, clauses: Sequence[Clause]):\n self._clauses = clauses\n\n def get_clauses(self) -> Sequence[Clause]:\n return self._clauses\n\n def get_predicates(self) -> Set[Predicate]:\n return reduce(lambda x, y: x.union(y), [x.get_predicates() for x in self._clauses], set())\n\n def get_variables(self) -> Dict[int, Set[Variable]]:\n return dict([(ind, x.get_variables()) for ind, x in enumerate(self._clauses)])\n\n def get_atoms(self, with_predicates: Sequence[Predicate] = None) -> Dict[int, Sequence[Literal]]:\n return dict([(ind, x.get_atoms(with_predicates=with_predicates)) for ind, x in enumerate(self._clauses)])\n\n def get_head(self) -> Dict[int, Literal]:\n return dict([(ind, x.get_head()) for ind, x in enumerate(self._clauses)])\n\n def has_singleton_var(self) -> bool:\n return any([x.has_singleton_var() for x in self._clauses])\n\n def _check_for_unification_with_body(self, literals: List[Union[Literal, Not]]) -> Dict[int, List[Dict[Term, Term]]]:\n return dict([(ind, x._check_for_unification_with_body(literals)) for ind, x in enumerate(self._clauses)])\n\n def is_part_of(self, clause: \"Clause\") -> Dict[int, List[Tuple[List[Literal], Dict[Term, Term]]]]:\n return dict([(ind, x.is_part_of(clause)) for ind, x in enumerate(self._clauses)])\n\n def is_recursive(self) -> bool:\n raise Exception(\"Not implemented yet!\")\n\n def unfold_with(self, clauses: Union[\"Clause\", Iterator[\"Clause\"]]) -> 'ClausalConstruct':\n unfolded_clauses = reduce(lambda x, y: x + y,\n [x.unfold_with(clauses) for x in self._clauses],\n [])\n\n if isinstance(self, Disjunction):\n return Disjunction(unfolded_clauses)\n else:\n return Recursion(unfolded_clauses)\n\n def substitute(self, term_map: Dict[Term, Term], clause_index: Union[int, Sequence[int]]):\n if isinstance(clause_index, int):\n clause_index = [clause_index]\n elif clause_index is None:\n clause_index = list(range(len(self._clauses)))\n else:\n pass\n\n new_clauses = []\n for ind, cl in enumerate(self._clauses):\n if ind in clause_index:\n new_clauses += cl.substitute(term_map)\n else:\n new_clauses += cl\n\n if isinstance(self, Disjunction):\n return Disjunction(new_clauses)\n else:\n return Recursion(new_clauses)\n\n def substitute_atoms(self,\n atoms_to_replace: List[Union[Literal, Not]],\n new_atom: Literal,\n substitutes: Dict[Term, Term],\n clause_index: Union[int, Sequence[int]]):\n if isinstance(clause_index, int):\n clause_index = [clause_index]\n elif clause_index is None:\n clause_index = list(range(len(self._clauses)))\n else:\n pass\n\n new_clauses = []\n for ind, cl in enumerate(self._clauses):\n if ind in clause_index:\n new_clauses += cl.substitute_atoms(atoms_to_replace, new_atom, substitutes)\n else:\n new_clauses += cl\n\n if isinstance(self, Disjunction):\n return Disjunction(new_clauses)\n else:\n return Recursion(new_clauses)\n\n def substitute_predicate_occurence(self, old_predicate: Predicate, new_predicate: Predicate) -> Union['Disjunction', 'Recursion']:\n if isinstance(self, Disjunction):\n return Disjunction([x.substitute_predicate_occurence(old_predicate, new_predicate) for x in self._clauses])\n else:\n return Recursion([x.substitute_predicate_occurence(old_predicate, new_predicate) for x in self._clauses])\n\n def __len__(self):\n return len(self._clauses)\n\n\nclass Disjunction(ClausalConstruct):\n\n def __init__(self, clauses: Sequence[Clause]):\n assert not any([x.is_recursive() for x in clauses])\n super(Disjunction, self).__init__(clauses)\n\n def is_recursive(self) -> bool:\n return False\n\n\nclass Recursion(ClausalConstruct):\n\n def __init__(self, clauses: Sequence[Clause]):\n assert any([x.is_recursive() for x in clauses])\n super(Recursion, self).__init__(clauses)\n\n def is_recursive(self) -> bool:\n return True\n\n def get_base_case(self) -> Sequence[Clause]:\n return [x for x in self._clauses if not x.is_recursive()]\n\n def get_recursive_calse(self) -> Sequence[Clause]:\n return [x for x in self._clauses if x.is_recursive()]\n\n\nclass Theory:\n def __init__(self, formulas: Sequence[Formula]):\n self._formulas: Sequence = formulas\n\n def get_formulas(self, predicates: Set[Predicate] = None) -> Sequence[Formula]:\n if predicates:\n return [x for x in self._formulas if any([p for p in x.get_predicates()])]\n else:\n return self._formulas\n\n def __len__(self):\n return len(self.get_formulas())\n\n def num_literals(self):\n return sum([len(x) for x in self._formulas])\n\n def get_predicates(self) -> Set[Predicate]:\n raise Exception(\"Not implemented yet!\")\n\n\nclass Context:\n def __init__(self):\n self._logic = LP\n self._predicates = {} # name/arity -> Predicate\n self._variables = {} # domain -> {name -> Variable}\n self._constants = {} # domain -> {name -> Constant}\n self._literals = {} # Predicate -> { tuple of terms -> Atom}\n # ; TO BE USED FOR LP\n self._fatoms = {} # TO BE USED WITH FOL\n self._domains = {\"thing\": Type(\"thing\")} # name -> Type\n self._id_to_constant = {} # domain (str) -> {id -> Constant}\n\n def _predicate_sig(self, name, arity):\n return f\"{name}/{arity}\"\n\n def get_logic(self):\n return self._logic\n\n def set_logic(self, logic):\n assert logic in [LP, FOL]\n self._logic = logic\n\n def get_predicates(self) -> Sequence[Predicate]:\n return [v for k, v in self._predicates.items()]\n\n def get_constants(self) -> Sequence[Constant]:\n p = [[v for k,v in self._constants[z].items()] for z in self._constants]\n return reduce(lambda x,y: x + y, p, [])\n\n def get_variables(self) -> Sequence[Variable]:\n return reduce(lambda x, y: x + y, [[v for k, v in self._variables[z].items()] for z in self._variables], [])\n\n def get_types(self) -> Sequence[Type]:\n return [v for k, v in self._domains.items()]\n\n def constant_by_id(self, c_id: int, c_type: Union[str, Type]) -> Constant:\n if isinstance(c_type, Type):\n c_type = c_type.name\n\n return self._id_to_constant[c_type][c_id]\n\n def type(self, name):\n if name not in self._domains:\n t = Type(name)\n self._domains[name] = t\n\n return self._domains[name]\n\n def predicate(self, name, arity, domains=()) -> Predicate:\n if len(domains) == 0:\n domains = [self._domains[\"thing\"]] * arity\n\n domains = [d if isinstance(d, Type) else self._domains[d] for d in domains]\n\n if not self._predicate_sig(name, arity) is self._predicates:\n p = Predicate(name, arity, domains)\n self._predicates[self._predicate_sig(name, arity)] = p\n\n return self._predicates[self._predicate_sig(name, arity)]\n\n def variable(self, name, domain=None) -> Variable:\n if domain is None:\n domain = \"thing\"\n elif isinstance(domain, Type):\n domain = domain.name\n\n if domain not in self._variables:\n self._variables[domain] = {}\n\n if name not in self._variables[domain]:\n v = Variable(name, sym_type=self._domains[domain])\n self._variables[domain][name] = v\n\n return self._variables[domain][name]\n\n def constant(self, name, domain=None) -> Constant:\n if domain is None:\n domain = \"thing\"\n elif isinstance(domain, Type):\n domain = domain.name\n\n if domain not in self._id_to_constant:\n self._id_to_constant[domain] = {}\n\n if domain not in self._constants:\n self._constants[domain] = {}\n\n if name not in self._constants[domain]:\n c = Constant(name, self._domains[domain])\n self._constants[domain][name] = c\n self._id_to_constant[domain][c.id()] = c\n\n return self._constants[domain][name]\n\n def literal(self, predicate: Predicate, arguments: List[Term]) -> \"Literal\":\n if predicate not in self._literals:\n self._literals[predicate] = {}\n\n if tuple(arguments) not in self._literals[predicate]:\n self._literals[predicate][tuple(arguments)] = Literal(predicate, arguments)\n\n return self._literals[predicate][tuple(arguments)]\n\n\nglobal_context = Context()\n\n\ndef _get_proper_context(ctx) -> Context:\n if ctx is None:\n global global_context\n return global_context\n else:\n return ctx\n\n\ndef c_pred(name, arity, domains=(), ctx: Context = None) -> Predicate:\n ctx = _get_proper_context(ctx)\n return ctx.predicate(name, arity, domains=domains)\n\n\ndef c_const(name, domain=None, ctx: Context = None) -> Constant:\n ctx = _get_proper_context(ctx)\n return ctx.constant(name, domain=domain)\n\n\ndef c_id_to_const(id: int, type: Union[str, Type], ctx: Context = None) -> Constant:\n ctx = _get_proper_context(ctx)\n return ctx.constant_by_id(id, type)\n\n\ndef c_var(name, domain=None, ctx: Context = None) -> Variable:\n ctx = _get_proper_context(ctx)\n return ctx.variable(name, domain=domain)\n\n\ndef c_literal(\n predicate: Predicate, arguments: List[Term], ctx: Context = None\n) -> Literal:\n ctx = _get_proper_context(ctx)\n return ctx.literal(predicate, arguments)\n\n\ndef set_logic(logic, ctx: Context = None):\n ctx = _get_proper_context(ctx)\n ctx.set_logic(logic)\n\n\ndef are_variables_connected(atoms: Sequence[Literal]):\n \"\"\"\n Checks whether the Variables in the clause are connected\n\n Args:\n atoms (Sequence[Literal]): atoms whose variables have to be checked\n\n \"\"\"\n g = nx.Graph()\n\n for atm in atoms:\n vrs = atm.get_variables()\n if len(vrs) == 1:\n g.add_node(vrs[0])\n else:\n for cmb in combinations(vrs, 2):\n g.add_edge(cmb[0], cmb[1])\n\n res = nx.is_connected(g)\n del g\n\n return res\n\n\ndef _are_two_set_of_literals_identical(\n clause1: Union[List[Literal], Dict[Sequence[Predicate], Dict]],\n clause2: Union[List[Literal], Dict[Sequence[Predicate], Dict]],\n) -> bool:\n \"\"\"\n Checks whether two sets of literal are identical, i.e. unify, up to the variable naming\n :param clause1:\n :param clause2:\n :return:\n \"\"\"\n clause1_sig = (\n _create_term_signatures(clause1) if isinstance(clause1, list) else clause1\n )\n clause2_sig = (\n _create_term_signatures(clause2) if isinstance(clause2, list) else clause2\n )\n\n if len(clause1_sig) != len(clause2_sig):\n return False\n else:\n clause1_sig = dict([(frozenset(v.items()), k) for k, v in clause1_sig.items()])\n clause2_sig = dict([(frozenset(v.items()), k) for k, v in clause2_sig.items()])\n\n matches = clause1_sig.keys() & clause2_sig.keys()\n\n # TODO: this is wrong if constants are used\n # terms_cl1 = set()\n # for l in clause1:\n # for v in l.get_terms():\n # terms_cl1.add(v)\n #\n # terms_cl2 = set()\n # for l in clause2:\n # for v in l.get_terms():\n # terms_cl2.add(v)\n\n return len(matches) == max(len(clause1_sig), len(clause2_sig))\n\n\ndef _create_term_signatures(\n literals: List[Union[Literal, Not]]\n) -> Dict[Term, Dict[Tuple[Predicate], int]]:\n \"\"\"\n Creates a term signature for each term in the set of literals\n\n A term signature is a list of all appearances of the term in the clause.\n The appearances are described as a tuple (predicate name, position of the term in the arguments)\n\n Args:\n literals (List[Literal]): list of literals of the clause\n\n Returns:\n returns a dictionary with the tuples as keys and their corresponding number of occurrences in the clause\n\n \"\"\"\n term_signatures = {}\n\n for lit in literals:\n for ind, trm in enumerate(lit.get_terms()):\n if trm not in term_signatures:\n term_signatures[trm] = {}\n\n if isinstance(lit, Not):\n tmp_atm = lit.get_formula()\n if isinstance(tmp_atm, Literal):\n tmp_sig = (f\"not_{tmp_atm.get_predicate().get_name()}\", ind)\n else:\n raise Exception(\"Only atom can be negated!\")\n else:\n tmp_sig = (lit.get_predicate().get_name(), ind)\n term_signatures[trm][tmp_sig] = term_signatures[trm].get(tmp_sig, 0) + 1\n\n return term_signatures\n" }, { "alpha_fraction": 0.5450516939163208, "alphanum_fraction": 0.5450516939163208, "avg_line_length": 14.744186401367188, "blob_id": "710bdee3d14a6e122f37766693b099eb2f5d2e22", "content_id": "0f6e5df73faed7354dd1ee0794cb157555ab98f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 677, "license_type": "no_license", "max_line_length": 50, "num_lines": 43, "path": "/loreleai/language/kanren/__init__.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "from .kanren_utils import construct_recursive_rule\nfrom ..commons import (\n Term,\n Constant,\n Variable,\n Structure,\n Predicate,\n Type,\n Not,\n Type,\n Theory,\n c_pred,\n c_const,\n c_id_to_const,\n c_var,\n c_literal,\n Literal,\n Clause,\n are_variables_connected\n)\nfrom ..lp.lp import ClausalTheory\n\n__all__ = [\n \"Term\",\n \"Constant\",\n \"Variable\",\n \"Structure\",\n \"Predicate\",\n \"Type\",\n \"Not\",\n \"Type\",\n \"Theory\",\n \"ClausalTheory\",\n \"c_pred\",\n \"c_const\",\n \"c_id_to_const\",\n \"c_var\",\n \"c_literal\",\n \"Clause\",\n \"Literal\",\n \"are_variables_connected\",\n 'construct_recursive_rule'\n]\n" }, { "alpha_fraction": 0.7368420958518982, "alphanum_fraction": 0.7368420958518982, "avg_line_length": 24.33333396911621, "blob_id": "0188b74d7a5738d2aef56bd9140a26a9e8f4d858", "content_id": "f87b0a9764a7ed6ead1c721a807f04b45f3d4fb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 76, "license_type": "no_license", "max_line_length": 51, "num_lines": 3, "path": "/loreleai/reasoning/__init__.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "from loreleai.reasoning.lp.lpsolver import LPSolver\n\n__all__ = ['LPSolver']\n" }, { "alpha_fraction": 0.4618336856365204, "alphanum_fraction": 0.49339020252227783, "avg_line_length": 38.59321975708008, "blob_id": "3c59f40844c9ceaf64a5c77159fc8237d3660803", "content_id": "8e39245c471ed2229ab7d6952751ab447ff8ef73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2345, "license_type": "no_license", "max_line_length": 126, "num_lines": 59, "path": "/experiments/knorf/refactored_programs_pncorrect/strings/literals2-3_layerNone_time5400s_pruneTrue_altFalse_rcandsTrue_rrTrue_mrTrue_singlTrue/solving_evolution.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "import datetime\n\ntrials = list(range(5,11))\nplaytasks = list(range(4000,4200,200))\n\nfor t in trials:\n for pt in playtasks:\n fname = f\"refactored_programs-{pt}-{t}.pl.log\"\n \n ff = open(fname)\n lines = ff.readlines()\n lines = lines[-200:]\n ff.close()\n\n #print(f\"pt: {pt}, t: {t}\")\n \n start_point = None\n points = []\n for line in lines:\n if 'objective' in line:\n tmp = line.strip().split()\n year, month, day = tmp[0].replace('[', '').split('-')\n hour, mins, sec = tmp[1].replace(']', '').split(':')\n sec = sec.split(',')[0]\n try:\n time = datetime.datetime(int(year), int(month), int(day), int(hour), int(mins), int(sec))\n quality = float(tmp[6].replace(',', ''))\n if start_point is not None:\n points.append(((time - start_point).total_seconds(), quality))\n except Exception:\n pass\n\n elif 'Started solving' in line:\n tmp = line.strip().split()\n year, month, day = tmp[0].replace('[', '').split('-')\n hour, mins, sec = tmp[1].replace(']', '').split(':')\n sec = sec.split(',')[0]\n start_point = datetime.datetime(int(year), int(month), int(day), int(hour), int(mins), int(sec))\n\n if len(points) < 1:\n continue\n\n final_value = points[-1][1]\n p1_bound = final_value + final_value/100.0\n p5_bound = final_value + final_value/20.0\n p10_bound = final_value + final_value/10.0\n\n within_1 = [x for x in points if x[1] < p1_bound][0][0]\n within_5 = [x for x in points if x[1] < p5_bound][0][0]\n within_10 = [x for x in points if x[1] < p10_bound][0][0]\n\n print({'type': 'stats', 'trial': t, 'playtasks': pt, 'bound_1': within_1, 'bound_5': within_5, 'bound_10': within_10})\n\n normalised_points = [(x[0], x[1]-final_value) for x in points]\n biggest_value = normalised_points[0][1]\n normalised_points = [(x[0], x[1]/biggest_value) for x in normalised_points]\n\n for p in normalised_points:\n print({'type': 'run', 'trial': t, 'playtasks': pt, 'quality': p[1], 'time': p[0]})\n \n" }, { "alpha_fraction": 0.5586518049240112, "alphanum_fraction": 0.5617374777793884, "avg_line_length": 46.22645568847656, "blob_id": "0aacb8c3a0c0f318b0dafabe0e3f92ca0da2a020", "content_id": "1046f41f6f755b90ff55a27bda0738f18906d531", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 21065, "license_type": "no_license", "max_line_length": 251, "num_lines": 446, "path": "/loreleai/language/lp/lp.py", "repo_name": "sebdumancic/knorf_aaai21", "src_encoding": "UTF-8", "text": "from __future__ import annotations\n\nfrom functools import reduce\nfrom typing import List, Dict, Set, Tuple, Sequence, Union\nfrom abc import ABC\n\nimport pygraphviz as pgv\n\n# from . import parse\nfrom ..commons import Predicate, Theory, Variable, Literal, c_var, \\\n c_pred, c_const, c_literal, Clause, _are_two_set_of_literals_identical, Recursion, Disjunction, ClausalConstruct\n\n\nclass ClausalTheory(Theory):\n\n def __init__(self, formulas: Sequence[Clause] = None, read_from_file: str = None):\n assert formulas is not None or read_from_file is not None\n\n if read_from_file:\n # TODO: fix this for clauses that spread on more than one line\n formulas = []\n # formulas_per_head = {}\n inf = open(read_from_file)\n\n for line in inf.readlines():\n if len(line) > 3 and not line.startswith('#') and not line.startswith('%') and not line.startswith('//') and not line.startswith('true.'):\n f = parse(line.strip().replace('.', ''))\n formulas.append(f)\n # if f.get_head().get_predicate() not in formulas_per_head:\n # formulas_per_head[f.get_head().get_predicate()] = []\n # formulas_per_head[f.get_head().get_predicate()].append(f)\n #\n # for k in formulas_per_head:\n # if len(formulas_per_head[k]) == 1:\n # formulas.append(formulas_per_head[k][0])\n # else:\n # is_recursive = any([x.is_recursive() for x in formulas_per_head[k]])\n # if is_recursive:\n # formulas.append(Recursion(formulas_per_head[k]))\n # else:\n # formulas.append(Disjunction(formulas_per_head[k]))\n # del formulas_per_head\n self._recursive_predicates = {x.get_head().get_predicate() for x in formulas if x.is_recursive()}\n super(ClausalTheory, self).__init__(formulas)\n\n def get_formulas(self, predicates: Set[Predicate] = None) -> Sequence[Union[Clause, ClausalTheory]]:\n if predicates:\n return [x for x in self._formulas if any([p in predicates for p in x.get_predicates()])]\n else:\n return self._formulas\n\n def remove_formulas_with_predicates(self, predicates_in_questions: Set[Predicate]):\n \"\"\"\n Removes all formulas that use at least one of the provided predicates\n \"\"\"\n self._formulas = [x for x in self._formulas if not any([p in predicates_in_questions for p in x.get_predicates()])]\n\n def get_predicates(self) -> Set[Predicate]:\n return reduce((lambda x, y: x.union(y)), [x.get_predicates().union({x.get_head().get_predicate()}) for x in self._formulas])\n\n def _replace_predicates(self, clauses: Sequence[Union[Clause, ClausalConstruct]],\n predicate_replacements: Dict[Predicate, Predicate]\n ) -> Sequence[Union[ClausalConstruct, Clause]]:\n \"\"\"\n Replaces the occurrences of the predicates in clauses with their substitutes specified in [predicate_replacements\n\n Args:\n clauses [Sequence[Union[Clause, ClausalConstruct]]]: a collection of clauses\n predicate_replacements [Dict[Predicate, Predicate]]: predicate substitutions\n keys are replaced with their values\n\n Returns:\n a collection of clauses given as an input, with the predicates replaced\n \"\"\"\n _new_clauses = []\n _predicate_to_replace = set(predicate_replacements.keys())\n\n for cl in clauses:\n _to_replace_in_clause = _predicate_to_replace.intersection(cl.get_predicates())\n\n if len(_to_replace_in_clause) > 0:\n tmp_cl = cl\n for p in _to_replace_in_clause:\n tmp_cl = tmp_cl.substitute_predicate_occurence(p, predicate_replacements[p])\n _new_clauses.append(tmp_cl)\n else:\n _new_clauses.append(cl)\n\n return _new_clauses\n\n def _remove_duplicate_clauses(self,\n clauses: Sequence[Union[Clause,ClausalConstruct]]\n ) -> Tuple[Sequence[Union[Clause, ClausalConstruct]], Dict[Predicate, Predicate]]:\n\n predicate_replacements = {} # predicate to be replaced -> which predicate to replace with\n new_forms = []\n\n # filter out duplicated individual clauses\n frms_per_length = {} # if clause, the key is length\n # if clauseConstruct, the key is tuple of clause lengths sorted\n\n for cl in clauses:\n if isinstance(cl, Clause):\n l = len(cl)\n else:\n l = tuple(sorted([len(x) for x in cl.get_clauses()]))\n\n if l not in frms_per_length:\n frms_per_length[l] = []\n frms_per_length[l].append(cl)\n\n indices_to_remove = {}\n for l in frms_per_length:\n indices_to_remove[l] = set()\n for ind in range(0, len(frms_per_length[l])-1):\n for ind_i in range(ind + 1, len(frms_per_length[l])):\n cl1 = frms_per_length[l][ind]\n cl2 = frms_per_length[l][ind_i]\n\n if isinstance(cl1, Clause):\n # if the head predicate should already be replaced with another one, skip\n if cl1.get_head().get_predicate() in predicate_replacements:\n continue\n\n if cl1.get_predicates() == cl2.get_predicates() \\\n and _are_two_set_of_literals_identical(cl1.get_term_signatures(),\n cl2.get_term_signatures()):\n indices_to_remove[l].add(ind_i)\n # remember which head predicates can be substituted\n # change that pred in every other clause\n predicate_replacements[cl2.get_head().get_predicate()] = cl1.get_head().get_predicate()\n else:\n # can all clauses in the first construct be matched to clauses in the second construct\n # if predicate should already be replaced with another one, skip\n if cl1.get_clauses()[0].get_head().get_predicate() in predicate_replacements:\n continue\n\n _inner_clause_matches = dict([(x, False) for x in cl1.get_clauses()])\n for cl_in in cl1.get_clauses():\n _inner_clause_matches[cl_in] = any(\n [\n cl_in.get_predicates() == x.get_predicates() \\\n and _are_two_set_of_literals_identical(cl_in.get_term_signatures(),\n x.get_term_signatures())\n for x in cl2.get_clauses()\n ]\n )\n\n if all(list(_inner_clause_matches.values())):\n indices_to_remove[l].add(ind_i)\n predicate_replacements[cl2.get_head()[0].get_predicate()] = cl1.get_head()[0].get_predicate()\n\n for l in frms_per_length:\n removed = 0\n for ind in indices_to_remove[l]:\n frms_per_length[l].pop(ind - removed)\n removed += 1\n\n new_forms += [x for x in frms_per_length[l]]\n\n return new_forms, predicate_replacements\n\n def remove_duplicates(self):\n # new_forms = []\n _formulas_per_head = {}\n for cl in self._formulas:\n if isinstance(cl, Clause):\n hp = cl.get_head().get_predicate()\n else:\n hp = cl.get_head()[0].get_predicate()\n\n if hp not in _formulas_per_head:\n _formulas_per_head[hp] = []\n _formulas_per_head[hp].append(cl)\n\n forms_to_use = []\n for p in _formulas_per_head:\n if len(_formulas_per_head[p]) == 1:\n forms_to_use.append(_formulas_per_head[p][0])\n else:\n ffms = _formulas_per_head[p]\n if any([x.is_recursive() for x in ffms]):\n forms_to_use.append(Recursion(ffms))\n else:\n forms_to_use.append(Disjunction(ffms))\n\n # _base_clauses = [x for x in self._formulas if isinstance(x, Clause)]\n # _disjunctions = [x for x in self._formulas if isinstance(x, Disjunction)]\n # _recursions = [x for x in self._formulas if isinstance(x, Recursion)]\n\n _base_clauses = [x for x in forms_to_use if isinstance(x, Clause)]\n _disjunctions = [x for x in forms_to_use if isinstance(x, Disjunction)]\n _recursions = [x for x in forms_to_use if isinstance(x, Recursion)]\n\n del forms_to_use\n del _formulas_per_head\n\n # predicate_replacements = {} # predicate to be replaced -> which predicate to replace with\n #\n # # filter out duplicated individual clauses\n # frms_per_length = {}\n # for frm in _base_clauses:\n # l = len(frm)\n # if l not in frms_per_length:\n # frms_per_length[l] = []\n # frms_per_length[l].append(frm)\n #\n # indices_to_remove = {}\n # for l in frms_per_length:\n # indices_to_remove[l] = set()\n # for ind in range(0, len(frms_per_length[l])-1):\n # for ind_i in range(ind+1, len(frms_per_length[l])):\n # cl1 = frms_per_length[l][ind]\n # cl2 = frms_per_length[l][ind_i]\n #\n # if cl1.get_predicates() == cl2.get_predicates() \\\n # and _are_two_set_of_literals_identical(cl1.get_term_signatures(), cl2.get_term_signatures()):\n # indices_to_remove[l].add(ind_i)\n # # remember which head predicates can be substituted\n # # change that pred in every other clause\n # predicate_replacements[cl2.get_head().get_predicate()] = cl1.get_head().get_predicate()\n #\n # for l in frms_per_length:\n # removed = 0\n # for ind in indices_to_remove[l]:\n # frms_per_length[l].pop(ind - removed)\n # removed += 1\n #\n # new_forms += [x for x in frms_per_length[l]]\n #\n # # do the replacement\n # _base_clauses = self._replace_predicates(_base_clauses, predicate_replacements)\n\n # remove redundant base clauses\n _base_clauses, predicate_replacements1 = self._remove_duplicate_clauses(_base_clauses)\n if predicate_replacements1:\n _base_clauses = self._replace_predicates(_base_clauses, predicate_replacements1)\n _disjunctions = self._replace_predicates(_disjunctions, predicate_replacements1)\n _recursions = self._replace_predicates(_recursions, predicate_replacements1)\n\n # remove redundant disjunctions\n _disjunctions, predicate_replacements2 = self._remove_duplicate_clauses(_disjunctions)\n if predicate_replacements2:\n _base_clauses = self._replace_predicates(_base_clauses, predicate_replacements2)\n _disjunctions = self._replace_predicates(_disjunctions, predicate_replacements2)\n _recursions = self._replace_predicates(_recursions, predicate_replacements2)\n\n # remove redundant recursions\n _recursions, predicate_replacements3 = self._remove_duplicate_clauses(_recursions)\n if predicate_replacements3:\n _base_clauses = self._replace_predicates(_base_clauses, predicate_replacements3)\n _disjunctions = self._replace_predicates(_disjunctions, predicate_replacements3)\n _recursions = self._replace_predicates(_recursions, predicate_replacements3)\n\n self._formulas = [x for x in _base_clauses] # + [x for x in _disjunctions] + [x for x in _recursions]\n self._formulas = self._formulas + reduce(lambda x,y: x + y, [x.get_clauses() for x in _disjunctions], [])\n self._formulas = self._formulas + reduce(lambda x, y: x + y, [x.get_clauses() for x in _recursions], [])\n # self._formulas = [x for x in new_forms]\n # del new_forms\n del _base_clauses\n del _disjunctions\n del _recursions\n\n def flatten(self) -> ClausalTheory:\n \"\"\"\n Flattens the theory. Everything becomes a clause, and ClausalStructures are unrolled to individual clauses\n :return:\n \"\"\"\n forms = [x for x in self._formulas if isinstance(x, Clause)]\n\n forms_constructs = reduce(lambda x,y: x + y, [x.get_clauses() for x in self._formulas if isinstance(x, ClausalConstruct)], [])\n\n return ClausalTheory(forms + forms_constructs)\n\n def unfold(self):\n \"\"\"\n Unfolds the theory\n\n A theory containing two clauses\n h :- d,c,r.\n d :- a,b.\n Would be unfolded into\n h :- a,b,c,r.\n\n Returns:\n unfolded theory [Theory]\n \"\"\"\n\n def _unfold_recursively(clause: Clause, clause_index: Dict[Predicate, List[Clause]], forbidden_clauses: Set[\n Clause]) -> Tuple[List[Clause], Set[Clause]]:\n cl_predicates = [x.get_predicate() for x in clause.get_atoms()]\n if len(forbidden_clauses) == 0:\n matching_clauses_for_unfolding = dict([(k, clause_index[k]) for k in cl_predicates if k in clause_index])\n else:\n matching_clauses_for_unfolding = dict([(k, [p for p in clause_index[k] if p not in forbidden_clauses]) for k in cl_predicates if k in clause_index])\n\n if len(matching_clauses_for_unfolding) == 0:\n return [clause], set()\n else:\n used_clauses = [v for k, v in matching_clauses_for_unfolding.items()]\n used_clauses = reduce(lambda x, y: x + y, used_clauses)\n _new_form = clause.unfold_with(used_clauses)\n # once the recursive clause is used, do not allow another usage again\n # recursive_clauses = set([x for x in used_clauses if x.is_recursive()])\n # used_clauses = [x for x in used_clauses if not x.is_recursive()]\n # NOT NEEDED ANYMORE BECAUSE RECURSIVE PREDICATES ARE REMOVED FROM THE CANDIDATE SET\n final = [_unfold_recursively(x, clause_index, forbidden_clauses) for x in _new_form]\n\n final_clauses = reduce(lambda x, y: x + y, [z[0] for z in final])\n final_exclusion = reduce(lambda x, y: x.union(y), [z[1] for z in final])\n\n return final_clauses, final_exclusion.union(used_clauses)\n\n # create clause index\n clause_index = {}\n new_set_of_formulas = []\n recursively_defined_predicates = set()\n\n for cl in self._formulas:\n if cl.is_recursive():\n # detect predicates with recursive definitions\n # do not use them for unfolding because they can remove finite traces\n recursively_defined_predicates.add(cl.get_head().get_predicate())\n\n head_pred = cl.get_head().get_predicate()\n if head_pred not in clause_index:\n clause_index[head_pred] = []\n clause_index[head_pred].append(cl)\n\n clauses_to_exclude = set()\n # excluding recursively defined predicates from the candidate set, so that they are not used\n clause_index = dict([(k, v) for k, v in clause_index.items() if k not in recursively_defined_predicates])\n\n for cl in self.get_formulas():\n if cl in clauses_to_exclude:\n continue\n\n cls, excls = _unfold_recursively(cl, clause_index, set()) # at the beginning, no forbidden clause (used for recursive ones)\n new_set_of_formulas += cls\n clauses_to_exclude = clauses_to_exclude.union(excls)\n\n return ClausalTheory(new_set_of_formulas)\n\n def _build_clause_dependencies(self) -> Dict[Union[Clause, ClausalConstruct], Dict[str, Sequence[Union[Clause, ClausalConstruct]]]]:\n head_pred_index = dict([(x.get_head().get_predicates(), x) for x in self._formulas])\n dependencies = {} # key: clause -> value: dict [ \"dependsOn\": list of clauses it uses in the body,\n # \"useIt\": list of clauses that use it in their body]\n for cl in self._formulas:\n if cl not in dependencies:\n dependencies[cl] = {\"dependsOn\": [], \"useIt\": []}\n\n if isinstance(cl, Clause):\n clauses_in_the_body = {head_pred_index[x] for x in cl.get_predicates() if x in head_pred_index}\n else:\n clauses_in_the_body = [{head_pred_index[x] for x in tcl.get_predicates()} for tcl in cl.get_clauses()]\n clauses_in_the_body = reduce(lambda x,y: x.union(y), clauses_in_the_body, set())\n\n # record which clauses it depends on\n dependencies[cl][\"dependsOn\"] = clauses_in_the_body\n\n # record that the clauses in the body are used by the cl\n for bcl in clauses_in_the_body:\n if bcl not in dependencies:\n dependencies[bcl] = {\"dependsOn\": [], \"useIt\": []}\n dependencies[bcl][\"useIt\"].append(cl)\n\n return dependencies\n\n def unfold_new(self):\n # create dependencies between clauses\n clausal_dependencies = self._build_clause_dependencies()\n\n # unfold things bottom-up\n keepUnfolding = True\n\n while keepUnfolding:\n # find clauses that do not depend on any other clause, just on primitive preds\n clauses_to_inline = [x for x in clausal_dependencies if len(clausal_dependencies[x][\"dependsOn\"]) == 0]\n\n # find all clauses that depend only on primitives and are not recursive (= primitive support)\n # while no primitive only non-recursive clauses left\n # unfold all clauses that depend on the current primitive support\n # remove primitive support from the theory\n # update the dependency structure\n\n def visualize(self, filename: str, only_numbers=False):\n predicates_in_bodies_only = set() # names are the predicate names\n predicates_in_heads = set() # names are clauses\n\n for cl in self._formulas:\n predicates_in_heads.add(cl.get_head().get_predicate())\n predicates_in_bodies_only = predicates_in_bodies_only.union([x.get_predicate() for x in cl.get_atoms()])\n\n predicates_in_bodies_only = [x for x in predicates_in_bodies_only if x not in predicates_in_heads]\n\n graph = pgv.AGraph(directed=True)\n cl_to_node_name = {}\n\n for p in predicates_in_bodies_only:\n cl_to_node_name[p] = len(cl_to_node_name) if only_numbers else f\"{p.get_name()}/{p.get_arity()}\"\n graph.add_node(cl_to_node_name[p], color='blue')\n\n for cl in self._formulas:\n if cl.get_head().get_predicate() not in cl_to_node_name:\n ind = len(cl_to_node_name)\n #cl_to_node_name[cl] = ind if only_numbers else str(cl)\n cl_to_node_name[cl.get_head().get_predicate()] = ind if only_numbers else str(cl.get_head().get_predicate())\n graph.add_node(cl_to_node_name[cl.get_head().get_predicate()], clause=cl.get_head().get_predicate(), color='black' if ('latent' in cl.get_head().get_predicate().get_name() or \"_\" in cl.get_head().get_predicate().get_name()) else 'red')\n\n for cl in self._formulas:\n body_p = [x.get_predicate() for x in cl.get_atoms()]\n\n for p in body_p:\n graph.add_edge(cl_to_node_name[cl.get_head().get_predicate()], cl_to_node_name[p])\n\n graph.draw(filename, prog='dot')\n\n def __str__(self):\n return \"\\n\".join([str(x) for x in self._formulas])\n\n def __len__(self):\n return len(self._formulas)\n\n def num_literals(self):\n return sum([len(x)+1 for x in self._formulas])\n\n\ndef _convert_to_atom(string: str):\n pred, args = string.strip().replace(')', '').split('(')\n args = args.split(',')\n\n pred = c_pred(pred, len(args))\n args = [c_const(x) if x.islower() else c_var(x) for x in args]\n\n return c_literal(pred, args)\n\n\ndef parse(string: str):\n if \":-\" in string:\n head, body = string.split(\":-\")\n head, body = head.strip(), body.strip()\n body = [x + \")\" for x in body.split(\"),\")]\n head, body = _convert_to_atom(head), [_convert_to_atom(x) for x in body]\n return Clause(head, body)\n else:\n return _convert_to_atom(string)\n\n\n" } ]
23
aigerimka/project_6
https://github.com/aigerimka/project_6
dda483d413decbe09666e266b9a0ecc158c1a967
23cd681aab15932a83b2524264d44bc49a3d7877
f3595473e05e843f1a84ca09d22677f6f0bc605a
refs/heads/master
2020-04-08T03:24:21.836776
2018-11-24T21:17:59
2018-11-24T21:17:59
158,974,476
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6354838609695435, "alphanum_fraction": 0.6370967626571655, "avg_line_length": 35.47058868408203, "blob_id": "748d879eca97fa9db161372f117aa29d6c328830", "content_id": "bf5a65d5945a19dd9040a2c466c093ab43ab6f47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 620, "license_type": "no_license", "max_line_length": 89, "num_lines": 17, "path": "/project_6.py", "repo_name": "aigerimka/project_6", "src_encoding": "UTF-8", "text": "def number_of_days(pages, first_day, the_last_day):\n \"\"\"The function for calculating the number of days\"\"\"\n days = int(2 * pages / (first_day + the_last_day))\n print('You will finish reading this book in', days, 'days')\n\n\ndef main():\n \"\"\"The main function\"\"\"\n print('Enter your values:')\n pages = int(input('The number of pages in the book:'))\n first_day = int(input('The number of pages you read on the first day:'))\n the_last_day = int(input('The number of pages you suppose to read on the last day:'))\n number_of_days(pages, first_day, the_last_day)\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
kyle-ip/machine-learning-in-action
https://github.com/kyle-ip/machine-learning-in-action
11864dbe38e75375693e449edd8a1bc8abd760a0
d9f9d7aa5724ef77f1f6fbd0678316d183024854
f470454a7fcae2512d41145ca3c18603d5784ef8
refs/heads/master
2022-09-14T03:08:05.889998
2020-01-24T06:57:52
2020-01-24T06:57:52
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6509804129600525, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 20.20833396911621, "blob_id": "0501c58ea71b2a56a218d1d313316568227bbdc3", "content_id": "0bd7418557c45ec8e3f1aad7d7666d33573fe36d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1028, "license_type": "no_license", "max_line_length": 56, "num_lines": 24, "path": "/source/AdaBoost.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/2/11\n# @Author : yipwinghong\n# @Email : [email protected]\n# @File : AdaBoost.py\n# @Software: PyCharm\n\n\"\"\"\n AdaBoost:元算法,关于算法的组合方式,即集成方法(可以是不同算法集成、或同一算法不同设置下集成)\n 非均衡分类:正负类样本数目差距很大\n\n bagging:\n 自举汇聚法:原始数据集选择S次后得到S个新数据集,新数据集和原数据集大小相等(有放回抽取)\n 将某个学习学习算法分别作用于每个数据集、串行训练得到S个分类器,\n 使用S个分类器进行分类,并投票选出最多的类别作为最后的结果\n boosting:\n 训练分类器的方式和前面类似,但通过集中关注被已有分类器错分的数据获得新分类器\n\n 优点:泛化错误率低,易编码,可以应在大部分分类器上,无参数调整\n 缺点:对离群点敏感\n 适用数据类型:数值型和标称型数据\n\n\n\"\"\"\n\n" }, { "alpha_fraction": 0.542089581489563, "alphanum_fraction": 0.5504477620124817, "avg_line_length": 36.22222137451172, "blob_id": "4058b12b632fe321bfd6264383591c846969aab5", "content_id": "10f9a3cb7ea8e24fa1115a3a9450d87267446d19", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2041, "license_type": "no_license", "max_line_length": 97, "num_lines": 45, "path": "/sklearn/diabetes.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/2/7\n# @Author : yipwinghong\n# @Email : [email protected]\n# @File : diabetes.py 线性回归测试(糖尿病)\n# @Software: PyCharm\n\n\"\"\"\n 广义线性回归模型\n 其他回归模型:\n 岭回归 采用带罚项的残差平方和损失函数\n Lasso回归 采用待L1范数的罚项平方损失函数\n 贝叶斯岭回归\n 随机梯度下降回归\n 鲁棒回归\n ...\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom sklearn import datasets\nfrom sklearn import cross_validation # 划分数据集\n\n# 导入数据集\ndiabetes = datasets.load_diabetes()\nfeature, target = diabetes.data[:, np.newaxis, 2], diabetes.target # 选取其中一个特征\ntraining_feature, test_feature, training_target, test_target = cross_validation.train_test_split(\n feature, target, test_size=0.3, random_state=56\n) # 划分测试集和训练集\n\n# 训练模型\nmodel = linear_model.LinearRegression() # 线性回归模型(最小二乘回归,即使用平方损失函数)\nmodel.fit(training_feature, training_target) # 拟合样本点(特征向量)\nw, b = model.coef_, model.intercept_ # 拟合直线的权值和偏置(y(x) = wx + b)\n\n# 绘制图像\nplt.scatter(training_feature, training_target, color=\"black\") # 训练集散点\nplt.scatter(test_feature, test_target, color=\"red\") # 测试集散点\nplt.plot(test_feature, model.predict(test_feature), color=\"blue\", linewidth=3) # 拟合直线\nplt.legend((\"Fit line\", \"Train Set\", \"Test Set\"), loc=\"lower right\") # 图例\nplt.title(\"LinearRegression Example\") # 标题\nplt.xticks(()) # 不显示刻度\nplt.yticks(())\nplt.show()\n" }, { "alpha_fraction": 0.6041951775550842, "alphanum_fraction": 0.6265389919281006, "avg_line_length": 33.265625, "blob_id": "a8f483e726c5804d9a8beb60ca8103e629556365", "content_id": "326b7cf2a1d5324612283a6a6556f23cbdaae0b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2457, "license_type": "no_license", "max_line_length": 102, "num_lines": 64, "path": "/sklearn/digits.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/2/7\n# @Author : yipwinghong\n# @Email : [email protected]\n# @File : digits.py 支持向量机分类测试(手写数字识别)\n# @Software: PyCharm\n\nimport os\nimport pandas as pd\nfrom sklearn import cross_validation\nfrom sklearn.svm import LinearSVC, SVC # 支持向量机模型\nfrom sklearn.linear_model import Perceptron # 感知机模型\nfrom sklearn.metrics import accuracy_score # 性能评估\nfrom sklearn import datasets # 数据集\nimport matplotlib.pyplot as plt\n\n\npath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# 线性分类测试\n# df = pd.read_csv(os.path.join(path, \"data/svm_test_data.csv\"), header=0)\n# feature, target = df[[\"x\", \"y\"]], df[\"class\"] # 提取样本点特征和分类\n\nfeature, target = datasets.make_classification( # 随机生成二分类数据集\n n_samples=500, # 500个样本\n n_features=2, # 2个特征(等于n_informative + n_redundant + n_repeated)\n n_redundant=0, # 0个冗余特征\n n_informative=1, # 1个多信息特征\n n_clusters_per_class=1, # 1个簇/类别\n n_classes=2 # 2个类别输出\n)\n\ntraining_feature, test_feature, training_target, test_target = cross_validation.train_test_split(\n feature, target, test_size=0.7\n)\n\nmodel1 = Perceptron()\nmodel1.fit(training_feature, training_target)\nscore1 = model1.score(test_feature, test_target)\n\nmodel2 = LinearSVC()\nmodel2.fit(training_feature, training_target)\nscore2 = model2.score(test_feature, test_target)\n\n\n# 手写数字识别测试\ndigits = datasets.load_digits()\n\n\nfor i, image in enumerate(digits.images[:5]):\n plt.subplot(2, 5, i + 1) # 选取前5个手写数字打印灰度图\n plt.imshow(image, cmap=plt.cm.gray_r, interpolation=\"nearest\")\n# plt.show()\n\nfeature, target = digits.data, digits.target\ntraining_feature, test_feature, training_target, test_target = cross_validation.train_test_split(\n feature, target, test_size=0.3\n) # 随机划分数据集,70%作为训练集,30%作为测试集\n\nmodel = SVC(gamma=0.001)\nmodel.fit(training_feature, training_target)\nprediction = model.predict(test_feature)\nscore = accuracy_score(test_target, prediction)\nprint(score)\n" }, { "alpha_fraction": 0.8039215803146362, "alphanum_fraction": 0.8039215803146362, "avg_line_length": 33, "blob_id": "bb9091785def90fd199479933f02e06417e3bf23", "content_id": "c3e911d16e8b635e991fb49bc3d33ac5eae43f95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 102, "license_type": "no_license", "max_line_length": 71, "num_lines": 3, "path": "/README.md", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "# Machine Learning In Action\n\nExample code and learning note for the book Machine Learning in action.\n" }, { "alpha_fraction": 0.5371835231781006, "alphanum_fraction": 0.5482594966888428, "avg_line_length": 42.58620834350586, "blob_id": "647df76306c22512bab4246a7aa775536698449f", "content_id": "35da7373529e031a7db3c6745a0507df5b24caf2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1518, "license_type": "no_license", "max_line_length": 118, "num_lines": 29, "path": "/sklearn/iris.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/2/7\n# @Author : yipwinghong\n# @Email : [email protected]\n# @File : iris.py 决策树分类测试(鸢尾花)\n# @Software: PyCharm\n\nfrom sklearn import datasets # 数据集\nfrom sklearn import cross_validation # 划分数据集的方法\nfrom sklearn.tree import DecisionTreeClassifier # 决策树分类器\nfrom sklearn.metrics import accuracy_score # 评估计算方法查看预测结果的准确度\n\niris = datasets.load_iris() # 鸢尾花数据集\nfeature = iris.data # 特征(Sepal length, Sepal width, Petal length, Petal width)\ntarget = iris.target # 分类标签(共三类:0, 1, 2)\n\n# print(target)\n\ntraining_feature, test_feature, training_target, test_target = cross_validation.train_test_split(\n feature, target, test_size=0.3\n) # 按比例随机划分数据集与测试集(random_state=42指定混乱程度)\n\nmodel = DecisionTreeClassifier() # 创建分类器\nmodel.fit(training_feature, training_target) # 使用训练集训练模型\nprediction = model.predict(test_feature) # 使用模型预测测试集\nscore = accuracy_score(prediction, test_target) # 比较预测结果与实际分类,得出准确率\n\n\nprint(score)\n" }, { "alpha_fraction": 0.5603993535041809, "alphanum_fraction": 0.5797004699707031, "avg_line_length": 27.263322830200195, "blob_id": "f0ca5e85aeedd53541584747162f68cdd7b2f001", "content_id": "58bd92180fc422dfeda52bced3f7739a4ad87e3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10603, "license_type": "no_license", "max_line_length": 112, "num_lines": 319, "path": "/source/kNN.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "\"\"\"\n K近邻:\n 1、训练集\n 2、距离度量Lp(如欧氏距离L2)\n 3、k值(k值大,模型简单,估计误差小,近似误差大,当k=样本空间数时)\n 4、分类规则(如多数表决,即经验风险最小化)\n 对于任何一个新的输入实例它所属的类唯一地确定:\n 1、寻找近邻点(线性扫描:效率低,kd树:适合训练实例数远大于空间维数的情况)\n 2、多数表决确定分类\n\n 优点:精度高,对异常值不敏感,无数据输入假定\n 缺点:计算复杂度高,空间复杂度高\n 使用数据范围:数值型和标称型\n\"\"\"\n\nimport operator\nimport os\nfrom collections import Counter\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom numpy import *\n\n\nclass Node:\n \"\"\" 二叉树节点 \"\"\"\n\n def __init__(self, data=None, left=None, right=None):\n self.data = data\n self.left = left\n self.right = right\n\n\nclass KDNode(Node):\n \"\"\" KD树节点 \"\"\"\n\n def __init__(self, data=None, left=None, right=None, axis=None, x=None, y=None):\n super(KDNode, self).__init__(data, left, right)\n self.axis = axis # 记录当前节点划分轴、xy坐标、左右孩子\n self.x = x\n self.y = y\n\n\ndef createKDTree(pointList, axis=1):\n \"\"\"\n 递归创建KD树\n :param pointList: 当前未分配节点列表\n :param axis: 当前划分轴\n :return:\n \"\"\"\n\n if not pointList: # 所有节点都分配完成,返回空(表示叶节点的没有孩子)\n return None\n\n axis = 1 if axis == 0 else 0 # 切换轴\n\n pointList = sorted(pointList, key=lambda x: x[axis]) # 根据axis轴对节点列表排序\n median = len(pointList) // 2 # 根据中位数位置节点划分\n\n return KDNode(\n left=createKDTree(pointList[:median], axis),\n right=createKDTree(pointList[median+1:], axis),\n axis=axis, x=pointList[median][0][0], y=pointList[median][0][1],\n data=pointList[median][1]\n )\n\n\ndef cmpNode(point, treeNode):\n treeNodeVal = treeNode.x if treeNode.axis == 0 else treeNode.y\n return point[treeNode.axis] > treeNodeVal\n\n\ndef distance(point, node):\n return sqrt(\n (point[0] - node.x) ** 2 + (point[0] - node.x) ** 2\n )\n\n\ndef separate(point, node, ridus):\n return distance(point, node) > ridus\n\n\ndef searchNode(point, tree, k=3):\n \"\"\" kd树搜索 \"\"\"\n\n curNode = tree\n nodeCache = [curNode] # 节点缓存栈\n\n while curNode.left or curNode.right:\n curNode = curNode.right if cmpNode(point, curNode) else curNode.left\n nodeCache.insert(0, curNode)\n\n ridus = distance(point, nodeCache.pop(0))\n kNodeList = []\n for node in nodeCache:\n dis = distance(point, node)\n if len(kNodeList) < k:\n kNodeList.append((node, dis))\n else:\n for j, (d, n) in enumerate(kNodeList):\n if d > dis:\n kNodeList[j] = (node, dis)\n\n\ndef classify0(inX, dataSet, labels, k):\n \"\"\"\n k-近邻算法分类器(线性扫描)\n :param inX: 输入向量\n :param dataSet: 训练集\n :param labels: 标签向量\n :param k: k值\n :return:\n \"\"\"\n\n # 对属性值作距离计算\n dataSetSize = dataSet.shape[0] # 取数据集行数\n diffMat = tile(inX, (dataSetSize, 1)) - dataSet # 以输入向量创建dataSetSize行、重复1次矩阵(把inX纵向复制dataSetSize-1次组成矩阵)\n distances = ((diffMat ** 2).sum(axis=1)) ** 0.5 # 求输入向量与训练集各点的距离(sum对行求和,同行每列相加化为一列,axis=0则对列求和)\n sortedDistIndicies = argsort(distances) # 对距离从小到大排序,返回距离排序下标的数组(2, 3, 1, 0)\n\n classCount = Counter(\n [labels[sortedDistIndicies[i]] for i in range(k)]\n ) # 统计距离排名前k的已知数据点的类别出现次数,并取出现频率最高的预测分类\n\n return classCount.most_common()[0][0]\n\n\ndef createDataSet():\n \"\"\" 创建示例数据集 \"\"\"\n\n group = array([[1.0, 1.1], [1.0, 1.0], [0, 0], [0, 0.1]])\n labels = ['A', 'A', 'B', 'B']\n return group, labels\n\n\ndef file2matrix(filename):\n \"\"\" 读取文件到特征矩阵 \"\"\"\n\n lines = open(filename).readlines()\n\n numberOfLines = len(lines) # 取文件行数\n returnMat = zeros((numberOfLines, 3)) # 取文件每行三个字段构造矩阵(初始化为0)\n classLabelVector = [] # 返回数据点对应的标签\n\n for index, line in enumerate(lines, 0):\n listFromLine = line.strip().split('\\t')\n returnMat[index, :] = listFromLine[0: 3] # 取每行前三个字段赋到矩阵\n classLabelVector.append(listFromLine[-1]) # 最后一个字段作为标签\n\n return returnMat, classLabelVector\n\n\ndef autoNorm(dataSet):\n \"\"\"\n 归一化\n :param dataSet: 数据集\n :return:\n \"\"\"\n\n minVals, maxVals = dataSet.min(0), dataSet.max(0) # 选取每列最大、最小值组成一行\n ranges = maxVals - minVals # 每列极差组成的行\n m = dataSet.shape[0]\n normDataSet = (dataSet - tile(minVals, (m, 1))) / tile(ranges, (m, 1))\n\n return normDataSet, ranges, minVals\n\n\ndef datingClassTest(filename):\n \"\"\"\n 分类器测试\n :param filename:\n :return:\n \"\"\"\n\n datingDataMat, datingLabels = file2matrix(filename) # 读取数据集并作归一化处理\n normMat, ranges, minVals = autoNorm(datingDataMat)\n\n m = normMat.shape[0]\n hoRatio = 0.10 # 取其中10%作为测试集\n numTestVecs = int(m * hoRatio)\n\n trainingMat = normMat[numTestVecs: m, :]\n trainingLabels = datingLabels[numTestVecs: m]\n\n errorCount = 0 # 测试数据,记录分来错误数\n for i in range(numTestVecs):\n classifierResult = classify0( # 从测试集选出输入向量、训练集、标签向量,输入到分类器\n normMat[i, :], trainingMat, trainingLabels, 3\n )\n print(\"the classifier came back with: {0}, the real answer is: {1}\".format(\n classifierResult, datingLabels[i]\n ))\n if classifierResult != datingLabels[i]:\n errorCount += 1.0\n\n return errorCount / float(numTestVecs)\n\n\ndef classifyPerson(filename):\n \"\"\"\n 约会网站预测\n :param filename:\n :return:\n \"\"\"\n\n percentTats = input(\"percentage of time spent playing video games? \")\n ffMiles = input(\"frequent flier miles earned per year? \")\n iceCream = input(\"liters of ice cream consumed per year? \")\n datingDataMat, datingLabels = file2matrix(filename)\n normMat, ranges, minVals = autoNorm(datingDataMat)\n\n inArr = array([ffMiles, percentTats, iceCream]).astype(float)\n classifierResult = classify0(\n (inArr - minVals)/ranges, normMat, datingLabels, 3\n )\n\n print(\"You will probably like this person: \" + classifierResult)\n\n\ndef img2vector(filename):\n \"\"\"\n 把图像文件转化为向量\n :param filename:\n :return:\n \"\"\"\n returnVect = zeros((1, 1024))\n lines = open(filename).readlines()\n for i in range(32): # 读取文件前32行\n for j in range(32): # 把每行头31个字符存储在数组\n returnVect[0, 32 * i + j] = int(lines[i][j])\n\n return returnVect\n\n\n# 测试:手写字体识别\ndef handwritingClassTest(path, k=3):\n \"\"\"\n 手写字体识别\n 数据集存放由手写字体转化而来的0-1矩阵,\n 分类器读取训练集数据并逐行存放文件数据(为长度为1024的数组)到特征矩阵,同时记录对应的label\n 当测试时读取测试集文件为一个数组(输入向量)并与输入到分类器\n :param path: 文件根目录\n :return:\n \"\"\"\n hwLabels = []\n\n # 读取训练集\n trainingFileList = os.listdir(os.path.join(path, 'trainingDigits'))\n m = len(trainingFileList)\n trainingMat = zeros((m, 1024))\n for i in range(m):\n fileNameStr = trainingFileList[i]\n # fileStr = fileNameStr.replace(\".txt\", \"\")\n hwLabels.append(int(fileNameStr.split('_')[0])) # 从文件名解析分类数字\n trainingMat[i, :] = img2vector('trainingDigits/' + fileNameStr) # 把文件字符读取到矩阵\n\n # 读取测试集\n testFileList = os.listdir(os.path.join(path, 'testDigits'))\n errorCount = 0.0\n mTest = len(testFileList)\n for i in range(mTest):\n fileNameStr = testFileList[i]\n classNumStr = int(fileNameStr.split('_')[0])\n vectorUnderTest = img2vector('testDigits/' + fileNameStr)\n classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, k)\n\n # print(\"the classifier came back with: %d, the real answer is: %d\" % (classifierResult, classNumStr))\n if classifierResult != classNumStr:\n errorCount += 1.0\n\n print(\"\\nthe total number of errors is: %d\" % errorCount)\n print(\"\\nthe total error rate is: %f\" % (errorCount/float(mTest)))\n\n\n# if __name__ == \"__main__\":\n\n # filename = \"D:/machinelearninginaction/Ch02/datingTestSet2.txt\"\n\n # group, labels = createDataSet()\n # print(classify0([0, 0], group, labels, 3))\n\n # datingDataMat, datingLabels = file2matrix(filename)\n\n # fig = plt.figure()\n # ax = fig.add_subplot(111) # 参数:1行1列1块\n # ax.scatter(\n # datingDataMat[:, 1], datingDataMat[:, 2],\n # 15.0 * array(datingLabels), 15.0 * array(datingLabels),\n # )\n # plt.grid(True)\n # plt.show()\n # plt.title(\"K-\")\n # plt.xlabel('fly')\n # plt.ylabel('consume')\n\n # autoNorm(datingDataMat)\n # datingClassTest(filename)\n\n # filename = \"D:/machinelearninginaction/Ch02/datingTestSet.txt\"\n # classifyPerson(filename)\n\n # filename = \"D:/machinelearninginaction/Ch02/testDigits/0_13.txt\"\n # print(img2vector(filename))\n\n # path = \"D:/machinelearninginaction/Ch02/\"\n # handwritingClassTest(path, 2)\n \n \n\n tree = createKDTree(\n pointList=list(\n zip(\n [[7, 2], [5, 4], [9, 6], [2, 3], [4, 7], [8, 1]],\n [0, 1, 0, 1, 1, 0]\n )\n )\n )\n\n searchNode((8, 5), tree)" }, { "alpha_fraction": 0.5503137707710266, "alphanum_fraction": 0.5653538107872009, "avg_line_length": 30.76288604736328, "blob_id": "41bed2a059fae6e586bdb8889db7584b58eaecbb", "content_id": "b4e025cb95b41a1d9dfc587dad2a0e2678904e8c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11102, "license_type": "no_license", "max_line_length": 115, "num_lines": 291, "path": "/source/decisionTree.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "\"\"\"\n 决策树:内部节点表示一个特征/属性,叶节点表示一个类\n 从根到叶每条路径构建一条规则,路径和规则集合互斥且完备\n 表示给定特征条件下类的条件概率分布(P(Y|X):X特征下类为Y的概率)\n 复杂度:叶节点的个数,复杂度越高拟合程度越高(分类条件越苛刻、分类越细)\n 生成:考虑局部(当前选取信息增益最大的节点)最优(贪心算法,ID3、C4.5)\n 剪枝:考虑全局最优(避免分类过度,过拟合,CART)\n\n 优点:计算复杂度不高,输出结果易于理解,对中间值的确实不敏感,可以处理不相关特征数据\n 缺点:可能会产生过度匹配问题(过拟合)\n 适用数据类型:数值型和标称型\n\"\"\"\n\nfrom collections import Counter, defaultdict\nfrom math import log\nimport os\nimport pickle\nimport operator\nimport matplotlib.pyplot as plt\n\ndecisionNode = dict(boxstyle=\"sawtooth\", fc=\"0.8\")\nleafNode = dict(boxstyle=\"round4\", fc=\"0.8\")\narrow_args = dict(arrowstyle=\"<-\")\n\n\ndef getNumLeafs(myTree, numLeafs=0):\n \"\"\" 求决策树的叶节点数 \"\"\"\n\n son = myTree[list(myTree.keys())[0]] # 取当前树的第一个子树\n for _, v in son.items(): # 当前节点类型为dict,则继续求其子树的叶节点,否则叶节数+1\n numLeafs += getNumLeafs(v) if isinstance(v, dict) else 1\n\n return numLeafs\n\n\ndef getTreeDepth(myTree, maxDepth=0):\n \"\"\" 求决策树的深度 \"\"\"\n\n son = myTree[list(myTree.keys())[0]]\n for _, v in son.items(): # 当前节点类型为dict,则深度+1,否则为1,每个子树计算深度后与最大深度比较\n curDepth = getTreeDepth(v) + 1 if isinstance(v, dict) else 1\n if curDepth > maxDepth:\n maxDepth = curDepth\n return maxDepth\n\n\ndef plotNode(nodeTxt, centerPt, parentPt, nodeType):\n createPlot.ax1.annotate(\n nodeTxt, xy=parentPt, xycoords='axes fraction',\n xytext=centerPt, textcoords='axes fraction',\n va=\"center\", ha=\"center\", bbox=nodeType, arrowprops=arrow_args\n )\n\n\ndef plotMidText(cntrPt, parentPt, txtString):\n \"\"\" 在父子节点之间添加文本信息 \"\"\"\n xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]\n yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]\n createPlot.ax1.text(xMid, yMid, txtString, va=\"center\", ha=\"center\", rotation=30)\n\n\ndef plotTree(myTree, parentPt, nodeTxt): # if the first key tells you what feat was split on\n \"\"\" 打印决策树 \"\"\"\n\n numLeafs = getNumLeafs(myTree) # this determines the x width of this tree\n firstStr = list(myTree.keys())[0] # the text label for this node should be this\n\n cntrPt = (\n plotTree.xOff + (1.0 + float(numLeafs)) / 2.0 / plotTree.totalW,\n plotTree.yOff\n )\n\n plotMidText(cntrPt, parentPt, nodeTxt) # 标记子节点属性值\n plotNode(firstStr, cntrPt, parentPt, decisionNode)\n secondDict = myTree[firstStr]\n\n plotTree.yOff = plotTree.yOff - 1.0 / plotTree.totalD # 减少y偏移\n\n for k, v in secondDict.items():\n if isinstance(v, dict): # 递归打印子树\n plotTree(v, cntrPt, str(k))\n else: # 打印叶节点\n plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW\n plotNode(v, (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)\n plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(k))\n\n plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD\n\n# if you do get a dictonary you know it's a tree, and the first element will be another dict\n\n\ndef createPlot(inTree):\n fig = plt.figure(1, facecolor='white')\n fig.clf()\n axprops = dict(xticks=[], yticks=[])\n createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) # no ticks\n # createPlot.ax1 = plt.subplot(111, frameon=False) # ticks for demo puropses\n plotTree.totalW = float(getNumLeafs(inTree))\n plotTree.totalD = float(getTreeDepth(inTree))\n plotTree.xOff = -0.5/plotTree.totalW; plotTree.yOff = 1.0;\n plotTree(inTree, (0.5,1.0), '')\n plt.show()\n\n# def createPlot():\n# fig = plt.figure(1, facecolor='white')\n# fig.clf()\n# createPlot.ax1 = plt.subplot(111, frameon=False) #ticks for demo puropses\n# plotNode('a decision node', (0.5, 0.1), (0.1, 0.5), decisionNode)\n# plotNode('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)\n# plt.show()\n\n\ndef createDataSet():\n \"\"\" 创建示例数据集 \"\"\"\n\n dataSet = [\n [1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'],\n [0, 1, 'no'], [0, 1, 'no']\n ] # 前两项为属性值,最后一项为分类(正例和反例)\n labels = ['no surfacing', 'flippers'] # 特征:属性值对应的label\n return dataSet, labels\n\n\ndef calcShannonEnt(dataSet):\n \"\"\" 计算香农熵:即信息的期望,表示数据分布的混乱程度,与数据取值无关 \"\"\"\n\n labelCounts = Counter([i[-1] for i in dataSet])\n shannonEnt = 0.0 # 统计正例和反例个数\n for _, v in labelCounts.items():\n prob = float(v) / len(dataSet) # 信息:选择该分类的概率的负对数(概率越大,香农熵越小,混合数据越少)\n shannonEnt += prob * log(prob, 2)\n return -shannonEnt\n\n\ndef splitDataSet(dataSet, axis, value):\n \"\"\"\n 划分数据集:取数据集中第axis项特征值为value的项组成子集(不含该特征)\n :param dataSet: 数据集\n :param axis: 划分数据集的特征\n :param value: 特征的属性值\n :return:\n \"\"\"\n return [\n featVec[:axis] + featVec[axis + 1:]\n for featVec in dataSet\n if featVec[axis] == value\n ]\n\n\ndef chooseBestFeatureToSplit(dataSet):\n \"\"\" 求划分后信息增益最大的特征 \"\"\"\n\n baseEntropy = calcShannonEnt(dataSet) # 原始香农熵\n infoGainList = [] # 记录最大划分信息增益(熵或数据无序度减少的程度)及其特征编号\n\n for i in range(len(dataSet[0]) - 1): # 逐列取特征,其中最后一列为分类(正例和反例)\n featList = {example[i] for example in dataSet} # 存放一列属性值的列表\n newEntropy = 0.0\n for value in featList: # 依当前特征、逐个属性值划分数据集\n subDataSet = splitDataSet(dataSet, i, value) # 划分香农熵 = sum(划分子集的概率 * 子集熵),即条件熵\n newEntropy += len(subDataSet) / float(len(dataSet)) * calcShannonEnt(subDataSet)\n # infoGainList.append((baseEntropy - newEntropy, i)) # 信息增益\n infoGainList.append((newEntropy / baseEntropy, i)) # 信息增益比\n\n return sorted(infoGainList, key=lambda x: x[0], reverse=True)[0][1]\n\n\ndef majorityCnt(classList):\n \"\"\" 多数表决决定叶子节点分类 \"\"\"\n return Counter(classList).most_common(1)[0] # classCount[vote] = classCount.setdefault(vote, 0) + 1\n\n\ndef createTree(dataSet, labels):\n \"\"\"\n 创建决策树(以当前最佳特征为根,向下逐层用特征节点构造子树)\n :param dataSet: 数据集\n :param labels: 特征labels列表\n :return:\n \"\"\"\n\n classList = [example[-1] for example in dataSet] # 取当前数据集的所有正例和反例信息\n\n if classList.count(classList[0]) == len(classList): # 类别完全相同,停止划分\n return classList[0]\n\n if len(dataSet[0]) == 1: # 遍历完所有特征,返回出现最多的类别\n return majorityCnt(classList)\n\n bestFeat = chooseBestFeatureToSplit(dataSet) # 选取最佳特征(信息增益最大的)\n bestFeatLabel = labels[bestFeat]\n\n del(labels[bestFeat]) # 每次递归调用都删除当前最佳特征的标签,以免子树中重复出现该特征\n featValues = {i[bestFeat] for i in dataSet} # 取该特征下的所有属性值\n\n myTree = {bestFeatLabel: {}}\n for value in featValues: # 遍历所有属性值来创建子集,并对子集递归调用创建子树\n myTree[bestFeatLabel][value] = createTree(\n (dataSet, bestFeat, value), labels[:]\n )\n\n return myTree\n\n\ndef retrieveTree(i):\n \"\"\" 测试决策树 \"\"\"\n\n listOfTrees = [\n {\n \"no surfacing\": {\n 0: \"no\",\n 1: {\n \"flippers\": {\n 0: \"no\",\n 1: \"yes\"}\n }\n }\n },\n {\n \"no surfacing\": {\n 0: \"no\",\n 1: {\n \"flippers\": {\n 0: {\n \"head\": {\n 0: \"no\",\n 1: \"yes\"\n }\n },\n 1: \"no\"}\n }\n }\n }\n ]\n\n return listOfTrees[i]\n\n\ndef classify(inputTree, featLabels, testVec):\n \"\"\" 使用决策树实现分类 \"\"\"\n\n firstStr = list(inputTree.keys())[0] # 获取特征及其下子树和叶节点\n secondDict = inputTree[firstStr]\n\n key = testVec[featLabels.index(firstStr)] # 取该特征的在标签列表中的下标,并通过标签下标取测试向量的属性值\n valueOfFeat = secondDict[key] # 在树中取该属性值对应的决策\n\n return classify(valueOfFeat, featLabels, testVec) \\\n if isinstance(valueOfFeat, dict) else valueOfFeat\n\n\ndef storeTree(inputTree, filename):\n \"\"\" 存储决策树:毋须每次使用都重新创建 \"\"\"\n\n with open(filename, 'w') as f:\n pickle.dumps(inputTree, f)\n\n\ndef grabTree(filename):\n \"\"\" 读取决策树 \"\"\"\n\n return pickle.load(open(filename))\n\n\ndef lensesClassTest(path):\n \"\"\" 隐形眼镜分类测试 \"\"\"\n\n with open(os.path.join(path, \"lenses.txt\")) as f:\n lenses = [line.strip().split(\"\\t\") for line in f.readlines()]\n lensesLabels = [\"age\", \"prescript\", \"astigmatic\", \"tearRate\"]\n lensesTree = createTree(lenses, lensesLabels)\n\n return lensesTree\n\n\nif __name__ == \"__main__\":\n myDat, labels = createDataSet()\n\n # print(calcShannonEnt(myDat))\n # print(splitDataSet(myDat, 0, 1))\n # print(chooseBestFeatureToSplit(myDat))\n # print(createTree(myDat, labels))\n\n # getNumLeafs({'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}})\n # getTreeDepth({'no surfacing': {0: 'no', 1: {'flippers': {0: 'no', 1: 'yes'}}}})\n # print(getTreeDepth(retrieveTree(0)))\n\n # mytree = retrieveTree(0)\n # print(classify(mytree, labels, [1, 0]))\n\n tree = lensesClassTest(\"D:/machinelearninginaction/Ch03\")\n createPlot(tree)" }, { "alpha_fraction": 0.6563281416893005, "alphanum_fraction": 0.6623311638832092, "avg_line_length": 28.40441131591797, "blob_id": "2fb8132b1f3b4eb4053eec2ae04385728b419204", "content_id": "846c94d279ba3cb40683ed5981f628145831c02f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4208, "license_type": "no_license", "max_line_length": 102, "num_lines": 136, "path": "/sklearn/supervised_learning_test.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/2/7\n# @Author : yipwinghong\n# @Email : [email protected]\n# @File : supervised_learning_test.py\n# @Software: PyCharm\n\nimport os\n\n# 数据处理\nimport pandas as pd\nimport numpy as np\n\n# 图像绘制\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import ListedColormap\n\n# 学习模型\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\n\n# 集成学习算法\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\n\nfrom sklearn.gaussian_process import GaussianProcessClassifier # 高斯过程分类器\n\nfrom sklearn.linear_model import PassiveAggressiveClassifier # 广义线性分类器\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.linear_model import SGDClassifier\n\nfrom sklearn.neighbors import KNeighborsClassifier # K近邻分类器\n\nfrom sklearn.naive_bayes import GaussianNB # 朴素贝叶斯分类器\n\nfrom sklearn.neural_network import MLPClassifier # 神经网络分类器\n\nfrom sklearn.tree import DecisionTreeClassifier # 决策树分类器\nfrom sklearn.tree import ExtraTreeClassifier\n\nfrom sklearn.svm import SVC # 支持向量机分类器\nfrom sklearn.svm import LinearSVC\n\n\npath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\ndata = pd.read_csv(os.path.join(path, \"data/class_data.csv\"), header=0)\n\nfeature, target = data[['X', 'Y']], data['CLASS']\n\ntraining_feature, test_feature, training_target, test_target = train_test_split(\n feature, target, test_size=.3\n)\n\ncm_color = ListedColormap([\"red\", \"blue\"])\nplt.scatter(\n data['X'], data['Y'],\n c=data['CLASS'], cmap=cm_color\n)\n# plt.show()\n\n# 比较多种分类器\nmodels = {\n 'AdaBoost': AdaBoostClassifier(),\n 'Bagging': BaggingClassifier(),\n 'ExtraTrees': ExtraTreesClassifier(),\n 'GradientBoosting': GradientBoostingClassifier(),\n 'RandomForest': RandomForestClassifier(),\n 'GaussianProcess': GaussianProcessClassifier(),\n 'PassiveAggressive': PassiveAggressiveClassifier(),\n 'Ridge': RidgeClassifier(),\n 'SGD': SGDClassifier(),\n 'KNeighbors': KNeighborsClassifier(),\n 'GaussianNB': GaussianNB(),\n 'MLP': MLPClassifier(),\n 'DecisionTree': DecisionTreeClassifier(),\n 'ExtraTree': ExtraTreeClassifier(),\n 'SVC': SVC(),\n 'LinearSVC': LinearSVC()\n}\n\n\n# for name, model in models.items():\n# model.fit(training_feature, training_target)\n# prediction = model.predict(test_feature)\n# score = accuracy_score(test_target, prediction)\n# print(\"{0}\\t{1}\".format(name, score))\n\n# 绘制热力图\ni = 1\ncm = plt.cm.Reds\ncm_color = ListedColormap(['red', 'yellow'])\n\n# 栅格化\nx_min, x_max = data['X'].min() - .5, data['X'].max() + .5\ny_min, y_max = data['Y'].min() - .5, data['Y'].max() + .5\nxx, yy = np.meshgrid(\n np.arange(x_min, x_max, .1), np.arange(y_min, y_max, .1)\n)\n\nfor name, model in models.items():\n ax = plt.subplot(4, 4, i)\n model.fit(training_feature, training_target)\n prediction = model.predict(test_feature)\n score = accuracy_score(test_target, prediction)\n\n if hasattr(model, \"decision_function\"):\n z = model.decision_function(\n np.c_[xx.ravel(), yy.ravel()]\n )\n print(\"decision_function\", model)\n else:\n z = model.predict_proba(\n np.c_[xx.ravel(), yy.ravel()]\n )[:, 1]\n\n # 绘制决策边界热力图\n z = z.reshape(xx.shape)\n ax.contourf(xx, yy, z, cmap=cm, alpha=.6)\n\n # 绘制训练集和测试集\n ax.scatter(training_feature['X'], training_feature['Y'], c=training_target, cmap=cm_color)\n ax.scatter(test_feature['X'], test_feature['Y'], c=test_target, cmap=cm_color, edgecolors='black')\n\n # 图形样式设定\n ax.set_xlim(xx.min(), xx.max())\n ax.set_ylim(yy.min(), yy.max())\n ax.set_xticks(())\n ax.set_yticks(())\n ax.set_title('%s | %.2f' % (name, score))\n\n i += 1\n\nplt.show()" }, { "alpha_fraction": 0.5695976614952087, "alphanum_fraction": 0.5887662768363953, "avg_line_length": 33.25190734863281, "blob_id": "a54fd1bcc0ad2c2f5826ca53618cd665dd8eca29", "content_id": "67839d138388d93a9cf6cd291e6a88c755de0735", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10495, "license_type": "no_license", "max_line_length": 105, "num_lines": 262, "path": "/source/naiveBayes.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "\"\"\"\n 朴素贝叶斯:假设每个特征同等重要(特征之间相互独立)\n 实现方式:基于伯努利模型(只考虑是否出现)或基于多项式模型(考虑出现次数)\n p(ci|x, y) 后验概率:已知坐标(x, y),其属于类ci的概率\n p(ci) 先验概率:类c1的概率\n p(ci|x, y) = p(x, y|ci)p(ci) / p(x, y)\n\n 优点:在数据缺少的情况下依然有效,可以处理多类别问题\n 缺点:对于输入数据的准备方式较为敏感\n 适用数据类型:标称型数据\n\"\"\"\n\nimport re\n\nfrom numpy import *\nimport feedparser\n\n\ndef loadDataSet():\n \"\"\" 创建示例数据集 \"\"\"\n\n postingList=[\n ['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\n ['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'], # 1\n ['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\n ['stop', 'posting', 'stupid', 'worthless', 'garbage'], # 1\n ['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\n ['quit', 'buying', 'worthless', 'dog', 'food', 'stupid'] # 1\n ]\n classVec = [0, 1, 0, 1, 0, 1] # 1表示侮辱性文字,0表示正常言论\n return postingList, classVec\n\n\ndef createVocabList(dataSet):\n \"\"\" 词汇表去重排序 \"\"\"\n return sorted(list({str(item) for line in dataSet for item in line}))\n\n\ndef setOfWords2Vec(vocabList, inputSet):\n \"\"\"\n 词袋模型\n :param vocabList: 去重排序后的单词表\n :param inputSet: 输入评论\n :return: 评论的单词在单词表中出现的位置及其出现的次数\n \"\"\"\n\n returnVec = [0] * len(vocabList) # 在单词表中标记0为该位置的单词没有在输入的评论中出现的次数\n\n for word in inputSet:\n if word in vocabList:\n returnVec[vocabList.index(word)] += 1\n return returnVec\n\n\ndef trainNB0(trainMatrix, trainCategory):\n \"\"\"\n 朴素贝叶斯分类算法\n :param trainMatrix: 训练集\n :param trainCategory: 训练集分类标签\n :return:\n \"\"\"\n\n numTrainDocs = len(trainMatrix) # 输入的评论总行数\n numWords = len(trainMatrix[0]) # 单词表长度(sum(trainCategory)表示测试数据矩阵中侮辱性评论的总行数)\n pAbusive = sum(trainCategory) / float(numTrainDocs) # 先验概率:评论输入侮辱类的概率\n\n p0Num, p1Num = ones(numWords), ones(numWords) # 两个列表分别记录单词表中侮辱性/非侮辱性单词出现个数\n p0Denom, p1Denom = 2.0, 2.0 # 总单词数(Laplace Smoothing):样本总数 + 类的个数\n for i in range(numTrainDocs): # 逐行评论判断\n if trainCategory[i] == 1: # 如当前行评论存在侮辱性文字,则单词表在中该行评论出现的单词 + 1\n p1Num += trainMatrix[i]\n p1Denom += sum(trainMatrix[i]) # 统计侮辱性评论单词总个数\n else:\n p0Num += trainMatrix[i]\n p0Denom += sum(trainMatrix[i]) # 统计非侮辱性评论单词总个数\n p1Vect, p0Vect = p1Num / p1Denom, p0Num / p0Denom # 条件概率向量(即p(wi|c1)和p(wi|c0),见《统计学习方法》P51)\n\n return log(p0Vect), log(p1Vect), pAbusive # 返回取对数后的概率(避免下溢出)\n\n\ndef classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n \"\"\"\n 朴素贝叶斯分类器\n :param vec2Classify:\n :param p0Vec: 条件概率向量\n :param p1Vec:\n :param pClass1: 先验概率\n :return:\n \"\"\"\n p1 = sum(vec2Classify * p1Vec) + log(pClass1) # 分为正例及反例的概率\n p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)\n if p1 > p0:\n return 1\n else: \n return 0\n\n\ndef testingNB():\n \"\"\" 侮辱性评论分类测试 \"\"\"\n\n listOPosts, listClasses = loadDataSet()\n myVocabList = createVocabList(listOPosts)\n trainMat = [setOfWords2Vec(myVocabList, postinDoc) for postinDoc in listOPosts]\n p0V, p1V, pAb = trainNB0(array(trainMat), array(listClasses))\n\n testEntry = ['love', 'my', 'dalmation']\n thisDoc = array(setOfWords2Vec(myVocabList, testEntry)) # 当前评论在单词表中出现的位置和次数\n print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))\n\n testEntry = ['stupid', 'garbage']\n thisDoc = array(setOfWords2Vec(myVocabList, testEntry))\n print(testEntry, 'classified as: ', classifyNB(thisDoc, p0V, p1V, pAb))\n\n\n# 示例:使用朴素贝叶斯过滤垃圾邮件\ndef textParse(bigString):\n \"\"\" 切分文本为单词列表 \"\"\"\n listOfTokens = re.split(r'\\W*', bigString)\n return [tok.lower() for tok in listOfTokens if len(tok) > 2] \n\n\ndef spamTest(path):\n \"\"\" 垃圾邮件分类测试 \"\"\"\n\n docList, classList = [], []\n for i in range(1, 26): # 导入并解析文本文件\n filename = os.path.join(path, \"spam/{0}.txt\".format(i))\n\n wordList = textParse(open(filename).read())\n docList.append(wordList) # 按行保存邮件的单词\n classList.append(1)\n\n filename = os.path.join(path, \"ham/{0}.txt\".format(i))\n wordList = textParse(open(filename).read())\n docList.append(wordList)\n classList.append(0)\n\n vocabList = createVocabList(docList) # 构造词汇表\n\n totalErrorCount = 0\n for i in range(100): # 交叉验证:在整体数据集中随机抽取部分为测试集,剩余为训练集,迭代100次取平均值\n trainingSet = list(range(50))\n\n testSet = [] # 构造测试集\n for i in range(10):\n randIndex = random.randint(0, len(trainingSet))\n testSet.append(trainingSet[randIndex])\n del(trainingSet[randIndex])\n\n trainMat, trainClasses = [], [] # 构造训练集(邮件词汇及其出现在词汇表中的位置和次数)及其分类列表\n for docIndex in trainingSet:\n trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))\n trainClasses.append(classList[docIndex])\n p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses))\n\n errorCount = 0 # 对测试集分类\n for docIndex in testSet: # 逐行取测试邮件,转化为词袋并分类\n wordVector = setOfWords2Vec(vocabList, docList[docIndex])\n res = classifyNB(array(wordVector), p0V, p1V, pSpam)\n if res != classList[docIndex]:\n errorCount += 1\n print(\"classification error\", docList[docIndex])\n\n totalErrorCount += float(errorCount)\n print('the error rate is: ', totalErrorCount / 1000)\n\n\n# 示例:使用朴素贝叶斯分类器从个人广告中获取区域倾向\n\ndef calcMostFreq(vocabList, fullText):\n \"\"\" 选取出现频率最高的前30个元素 \"\"\"\n\n freqDict = {}\n for token in vocabList:\n freqDict[token] = fullText.count(token)\n\n return sorted(freqDict.items(), key=operator.itemgetter(1), reverse=True)[:30]\n\n\ndef localWords(feed1, feed0):\n \"\"\" RSS原分类器及高频词去除 \"\"\"\n\n docList, classList, fullText = [], [], []\n minLen = min((len(feed1['entries']), len(feed0['entries'])))\n for i in range(minLen):\n wordList = textParse(feed1['entries'][i]['summary'])\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(1)\n\n wordList = textParse(feed0['entries'][i]['summary'])\n docList.append(wordList)\n fullText.extend(wordList)\n classList.append(0)\n\n vocabList = createVocabList(docList) # 创建词汇表\n top30Words = calcMostFreq(vocabList, fullText) # 移除出现频次最高的前30个词\n print(top30Words)\n for pairW in top30Words:\n if pairW[0] in vocabList:\n vocabList.remove(pairW[0])\n\n # 还需要去除停词:https://www.ranks.nl/stopwords\n trainingSet = list(range(2 * minLen)) # 构造训练集和测试集\n testSet = []\n for i in range(20):\n randIndex = int(random.uniform(0,len(trainingSet)))\n testSet.append(trainingSet[randIndex])\n del(trainingSet[randIndex])\n\n trainMat, trainClasses = [], []\n for docIndex in trainingSet:\n trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))\n trainClasses.append(classList[docIndex])\n\n p0V, p1V, pSpam = trainNB0(array(trainMat), array(trainClasses))\n errorCount = 0\n for docIndex in testSet: # 对测试集分类\n wordVector = setOfWords2Vec(vocabList, docList[docIndex])\n if classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:\n errorCount += 1\n print('the error rate is: ', float(errorCount)/len(testSet))\n return vocabList, p0V, p1V\n\n\ndef getTopWords(ny, sf):\n \"\"\" 展示最具表征性的词汇 \"\"\"\n\n vocabList, p0V, p1V = localWords(ny, sf)\n topNY, topSF = [], []\n for i in range(len(p0V)):\n if p0V[i] > -6.0 :\n topSF.append((vocabList[i], p0V[i]))\n if p1V[i] > -6.0 :\n topNY.append((vocabList[i], p1V[i]))\n\n sortedSF = sorted(topSF, key=lambda pair: pair[1], reverse=True)\n print(\"SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**\")\n for item in sortedSF:\n print(item[0])\n sortedNY = sorted(topNY, key=lambda pair: pair[1], reverse=True)\n print(\"NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**\")\n for item in sortedNY:\n print(item[0])\n\nif __name__ == \"__main__\":\n listOPosts, listClasses = loadDataSet()\n myVocalbList = createVocabList(listOPosts)\n res = setOfWords2Vec(myVocalbList, listOPosts[0])\n\n trainMat = [setOfWords2Vec(myVocalbList, postinDoc) for postinDoc in listOPosts]\n\n # print(myVocalbList)\n # p0V, p1V, pAb = trainNB0(trainMat, listClasses)\n\n # 垃圾邮件过滤测试\n # spamTest(\"D:/machinelearninginaction/Ch04/email\")\n\n ny = feedparser.parse(\"http://newyork.craigslist.org/stp/index.rss\") # NY 记为1类\n sf = feedparser.parse(\"http://sfbay.craigslist.org/stp/index.rss\") # SF 记为0类\n localWords(ny, sf)\n getTopWords(ny, sf)" }, { "alpha_fraction": 0.5862776637077332, "alphanum_fraction": 0.5976749658584595, "avg_line_length": 27.493507385253906, "blob_id": "3becdeda97c6b986664189a70c548d002ad02171", "content_id": "9547c3525ac61a39a649e1669eae0da6634e0945", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4953, "license_type": "no_license", "max_line_length": 108, "num_lines": 154, "path": "/sklearn/boston.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n# @Time : 2018/2/7\n# @Author : yipwinghong\n# @Email : [email protected]\n# @File : boston.py 支持向量机分类测试(波士顿房产)\n# @Software: PyCharm\n\nimport os\n\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns # 更友好的绘图库\nimport warnings\n\nfrom sklearn import datasets\nfrom sklearn import cross_validation\nfrom sklearn.svm import LinearSVR\n\nfrom matplotlib import pyplot as plt\n\npath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# boston = datasets.load_boston()\n# # print(boston.DESCR)\n#\n# feature, target = boston.data, boston.target\n#\n# model = LinearSVR()\n# predictions = cross_validation.cross_val_predict( # 交叉验证\n# model, feature, target, cv=10 # 随机将数据集等分成10份(9份用作训练,1份用作测试,循环验证)\n# )\n#\n# plt.scatter(target, predictions)\n# plt.plot( # 绘制 45 度参考线\n# [target.min(), target.max()], [target.min(), target.max()],\n# \"k--\", lw=4\n# )\n# plt.xlabel(\"true_target\")\n# plt.ylabel(\"prediction\")\n#\n# plt.show()\n\n\ndef null_count(data):\n \"\"\"\n 统计缺失值:删除数目为0的特征,降序排列\n :param data:\n :return: 所有特征的缺失值个数\n \"\"\"\n null_data = data.isnull().sum() # 计算缺失值数量\n print(null_data)\n\n null_data = null_data.drop(null_data[null_data == 0].index).sort_values(ascending=False)\n\n return null_data\n\n\ndef fill_null(data, features, judge_feature, judge_value, replace_value):\n \"\"\"\n 填补缺失值\n :param data:\n :param features: 特征列表\n :param judge_feature: 判断特征\n :param judge_value: 判断值\n :param replace_value: 替换值\n :return:\n \"\"\"\n for feature in features:\n null_index = data[data[feature].isnull()].index # 查找该特征缺失的样本的索引\n sp_index = [i for i in null_index if data[judge_feature][i] != judge_value] # 查找判断特征对应值不合理的样本的索引\n data[feature].fillna(replace_value, inplace=True) # 使用替换值填补缺失值\n for i in sp_index:\n data[feature].iloc[i] = data[feature].mode()[0] # 众数填补缺失值\n return data\n\n\n# 忽略Warnings\ndef ignore(*args, **kwargs):\n pass\n\nwarnings.warn = ignore\n\n\n# 加载数据\ntraining_set = pd.read_csv(os.path.join(path, \"data/boston_train.csv\"))\ntest_set = pd.read_csv(os.path.join(path, \"data/boston_test.csv\"))\ntraining_set.drop(['Id'], axis=1, inplace=True) # 移除Id列,axis=0表示行,axis=1表示列,inplace表示在原DF上修改\ntest_set.drop(['Id'], axis=1, inplace=True)\n\n# 离群点:删除居住面积大于4000的数据\ntraining_set.drop(training_set[training_set['GrLivArea'] > 4000].index, inplace=True)\n\nsns.set(style='darkgrid')\n\n# 打印散点图\n# fig = plt.figure()\n# ax = plt.scatter(training_set['GrLivArea'], training_set['SalePrice'])\n#\n# plt.xlabel('GrLivArea')\n# plt.ylabel('SalePrice')\n\n# plt.show()\n\n# 打印房价分布曲线\n# training_set['SalePrice'] = np.log(training_set['SalePrice']) # Log Transformation(处理右偏态分布)\n# g = sns.distplot( # 绘制柱状图\n# training_set['SalePrice'],\n# kde=True, # 绘制拟合曲线\n# label='skewness:%.2f' % training_set['SalePrice'].skew() # skew()函数计算偏态系数\n# )\n# plt.legend(loc='best', fontsize='large')\n# g.set(xlabel='SalePrice')\n\n# plt.show()\n\n# 计算关联系数\n# plt.subplots(figsize=(20, 16))\n# sns.heatmap(training_set.corr(), square=True)\n# plt.show()\n\n# 合并训练集和测试集\ndata = pd.concat([training_set, test_set], axis=0, ignore_index=True)\n# null_count(data)\n\n# 缺失值处理\nzero_replace_features = [\n 'BsmtHalfBath', 'BsmtHalfBath', 'BsmtFullBath',\n 'BsmtUnfSF', 'TotalBsmtSF', 'BsmtFinSF2', 'BsmtFinSF1'\n] # 使用0填补\nfor feature in zero_replace_features:\n data[feature].fillna(0.0, inplace=True)\n\nfeatures = [\n 'BsmtQual', 'BsmtCond', 'BsmtExposure',\n 'BsmtFinType1', 'BsmtFinType2'\n] # 使用'U'填补\ndata = fill_null(data, features, 'TotalBsmtSF', 0.0, 'U')\ndata = fill_null(data, ['PoolQC'], 'PoolArea', 0, 'U')\n\n\nmode_inplace = [\n 'MSZoning', 'Utilities', 'Exterior1st', 'Electrical',\n 'Exterior2nd', 'KitchenQual', 'SaleType'\n] # 使用众数填补\nfor feature in mode_inplace:\n data[feature].fillna(data[feature].mode()[0], inplace=True)\n\ng = sns.factorplot(\n x='KitchenAbvGr', y='KitchenQual',\n data=data, kind='box'\n)\n\ndata['Functional'].fillna('Typ', inplace=True) # 使用'TYP'填补\nplt.show()" }, { "alpha_fraction": 0.5443654656410217, "alphanum_fraction": 0.5689340233802795, "avg_line_length": 29.036584854125977, "blob_id": "f2a182a4ae262f501f50d82b16b4bcc36f461109", "content_id": "fea80a88708bb3e1987c35c3fee8fb00a93c6c0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5829, "license_type": "no_license", "max_line_length": 108, "num_lines": 164, "path": "/source/logRegres.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "\"\"\"\n Logistic回归\n\n 优点:计算代价不高,易于理解和实现\n 缺点:容易欠拟合,分类精度可能不高\n 适用数据类型:数值型和标称型数据\n\n\"\"\"\n\nfrom numpy import *\nimport matplotlib.pyplot as plt\n\n\ndef loadDataSet():\n \"\"\" 创建示例数据集 \"\"\"\n\n dataMat, labelMat = [], []\n for line in open('testSet.txt').readlines(): # 3列(表示两个特征)、100行的数据集\n lineArr = line.strip().split()\n dataMat.append([1.0, float(lineArr[0]), float(lineArr[1])]) # 第一个补位\n labelMat.append(int(lineArr[2]))\n return array(dataMat), array(labelMat)\n\n\ndef sigmoid(inX):\n \"\"\"\n sigmoid函数 传入参数为向量内积(输入向量*权值向量)\n :param inX: \n :return: \n \"\"\"\n\n return 1.0 / (1 + exp(-inX))\n\n\ndef gradAscent(dataMatIn, classLabels, alpha=0.001, maxCycles=150):\n \"\"\"\n 梯度上升法:每次更新回归系数需要遍历整个数据集,批处理算法\n :param dataMatIn: \n :param classLabels:\n :param alpha: 学习率\n :return: \n \"\"\"\n\n dataMatrix = mat(dataMatIn) # 转换100 * 3矩阵:3个特征(2个可用),100行数据\n labelMat = mat(classLabels).transpose() # 转换100 * 1矩阵:转置\n m, n = shape(dataMatrix) # 获取矩阵行数、列数\n weights = ones((n, 1)) # 把权值初始化为3 * 1矩阵\n for _ in range(maxCycles): # TODO 根据偏差调整权值,迭代计算500次\n error = labelMat - sigmoid(dataMatrix * weights) # 计算预测结果与实际值的偏差\n weights += alpha * dataMatrix.transpose() * error # 梯度上升迭代计算权值(行列转置)\n return weights # 把权值矩阵转化为数组\n\n\ndef plotBestFit(weights):\n \"\"\" 绘制图像测试 \"\"\"\n\n dataMat, labelMat = loadDataSet()\n dataArr = array(dataMat)\n n, _ = shape(dataArr)\n\n xcord1, xcord2 = [], [] # 两组列表,分别存放0类、1类的x、y坐标\n ycord1, ycord2 = [], []\n for i in range(n):\n if int(labelMat[i]) == 1:\n xcord1.append(dataArr[i, 1])\n ycord1.append(dataArr[i, 2])\n else:\n xcord2.append(dataArr[i, 1])\n ycord2.append(dataArr[i, 2])\n\n fig = plt.figure() # 配置图像参数\n ax = fig.add_subplot(111)\n ax.scatter(xcord1, ycord1, s=30, c='red', marker='s')\n ax.scatter(xcord2, ycord2, s=30, c='green')\n x = arange(-3.0, 3.0, 0.1)\n y = (-weights[0] - weights[1] * x) / weights[2]\n ax.plot(x, y)\n plt.xlabel('X1')\n plt.ylabel('X2')\n plt.show()\n\n\ndef stocGradAscent0(dataMatrix, classLabels, alpha=0.01):\n \"\"\"\n 随机梯度上升法:增量更新(每次仅用一个样本点、即一行更新回归系数),在线学习算法\n :param dataMatrix: \n :param classLabels: \n :return: \n \"\"\"\n\n m, n = shape(dataMatrix)\n weights = ones(n)\n for i in range(m):\n error = classLabels[i] - sigmoid(sum(dataMatrix[i] * weights))\n weights += alpha * error * dataMatrix[i] # 根据偏差调整权值\n return weights\n\n\ndef stocGradAscent1(dataMatrix, classLabels, numIter=30):\n \"\"\"\n 改进的随机梯度上升法:1,多次迭代;2,随机抽取;3,根据迭代次数修改学习率\n :param dataMatrix: \n :param classLabels: \n :param numIter: 迭代次数\n :return: \n \"\"\"\n\n m, n = shape(dataMatrix)\n weights = ones(n)\n for j in range(numIter): # 迭代计算numIter次\n dataIndex = list(range(m))\n random.shuffle(dataIndex) # 打乱样本随机抽取\n for i in dataIndex: # 逐行样本插入训练\n alpha = 4 / (1.0 + j + i) + 0.0001 # 学习率随迭代次数和样本数增加而减小(当j<<max(i)时,alpha就不是严格下降的)\n error = classLabels[i] - sigmoid(sum(dataMatrix[i] * weights))\n weights += + alpha * error * dataMatrix[i]\n return weights\n\n\ndef classifyVector(inX, weights):\n \"\"\" 分类器函数 \"\"\"\n return 1 if sigmoid(sum(inX * weights)) > 0.5 else 0\n\n\n# 测试:从疝气病症预测病马死亡率\ndef colicTest():\n \"\"\" 预测病马死亡率 \"\"\"\n trainingSet = []\n trainingLabels = []\n for line in open('horseColicTraining.txt').readlines():\n currLine = line.strip().split('\\t')\n lineArr = [float(currLine[i]) for i in range(21)]\n trainingSet.append(lineArr)\n trainingLabels.append(float(currLine[-1]))\n\n trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 1000) # 随机梯度下降计算logist回归函数权值\n errorCount, numTestVec = 0, 0.0\n for line in open('horseColicTest.txt').readlines():\n numTestVec += 1.0\n currLine = line.strip().split('\\t')\n lineArr = [float(currLine[i]) for i in range(21)]\n if classifyVector(array(lineArr), trainWeights) != int(currLine[-1]): # 统计分类失败次数\n errorCount += 1\n errorRate = errorCount / numTestVec\n print(\"the error rate of this source is: {0}\".format(errorRate))\n return errorRate\n\n\ndef multiTest():\n \"\"\" 统计多次测试的失败率 \"\"\"\n\n numTests, errorSum = 10, 0.0\n errorSum = sum([colicTest() for _ in range(numTests)])\n print(\"after %d iterations the average error rate is: {0}\".format(numTests, errorSum / float(numTests)))\n\n\nif __name__ == \"__main__\":\n\n # dataArr, labelMat = loadDataSet()\n # w = stocGradAscent1(dataArr, labelMat)\n\n # w = gradAscent(dataArr, labelMat)\n # plotBestFit(w)\n multiTest()" }, { "alpha_fraction": 0.6589285731315613, "alphanum_fraction": 0.6625000238418579, "avg_line_length": 20.461538314819336, "blob_id": "271a548dd226a3daee89d3b00e42360a4b49b8e7", "content_id": "078d22c2d9d990f6d0b88f2d22263f2ace6ae8f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2324, "license_type": "no_license", "max_line_length": 82, "num_lines": 52, "path": "/ML_share_1.md", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "# 机器学习 - 分类算法介绍(极简)\n\n## 定义\n- 设计让程序可以从数据中分析规律、自动“学习”的算法,构建模型。\n- 与基于传统算法编写的程序区别在于机器学习程序**可通过不断迭代、学习经验后提高性能**)。\n- 其中数据分析部分涉及大量统计学理论,所以机器学习算法也被称为**统计学习方法**。\n\n## 学习方法\n#### 模型:学习的成果 \n> 程序学习的条件概率分布或决策函数。\n\n#### 策略:学习的准则 \n> 基于何种标准可以使学习出来的模型更准确(**正则化极大似然估计**、**极大后验概率估计**)?经验风险/结构风险最小化(**欠拟合**、**过拟合**问题)。\n\n#### 算法:计算方法\n> 直接求得最优化问题的解析解,或使用数值计算方法求出全局最优解(**(批量/随机)梯度下降法**、**牛顿法/拟牛顿法**)。\n\n## 类别\n#### 监督学习(Supervised Learning)\n问题 | 描述\n--- | ---\n**回归(Regression)** | 通过观测确定变量之间依赖关系(是否相关?相关方向?),构造函数拟合已有数据、建立连续变量预测模型 \n**分类(Classication)** | 基于已有数据集,模型对新的输入进行输出的预测,其输出是称为“类”的离散变量\n**标注(Tagging)** | 输入一个观测序列,输出一个标记序列或状态序列(例如使用隐含马尔科夫模型给句子标记词性)\n\n> 假设已知某地历史天气数据 \n回归问题:预测明天气温(多少度?) \n分类问题:预测明天天气(雨/阴/晴...)\n\n\n#### 半监督学习(Semi-supervised Learning)、非监督学习(Unsupervised Learning)\n\n## 监督学习分类算法简介\n> ...(图 + 板书 + 简单代码演示)\n#### 感知机(Pecetron)\n#### k近邻(k-Nearest Neighbor)\n#### 决策树(Decision Tree)\n#### 朴素贝叶斯(Naive Bayesian)\n#### Logistic回归(Logistic Regression)\n#### 支持向量机(Support Vector Machine)\n\n\n## 总结\n...\n\n## 参考\n- 李航 《统计学习方法》 \n- 周志华 《机器学习》 \n---\n推荐: \n- 新浪微博 - 爱可可-爱生活(人工智能前沿技术期刊、文献),网路冷眼 \n- 哔哩哔哩弹幕网科技频道 - 算法时空,圆桌字幕组,3Blue1Brown,fly51fly \n" }, { "alpha_fraction": 0.4635544717311859, "alphanum_fraction": 0.49795249104499817, "avg_line_length": 42.64285659790039, "blob_id": "262f9aeab6823a73e12efb5da5bce02baa38bf5f", "content_id": "c393a4bce00da8452624267efd85ce2ff9a9634c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1459, "license_type": "no_license", "max_line_length": 92, "num_lines": 28, "path": "/source/perceptron.py", "repo_name": "kyle-ip/machine-learning-in-action", "src_encoding": "UTF-8", "text": "from numpy import *\n\n\ndef classfy(dataSet, classList, learningRate=1, numIter=20):\n trainSetSize, dim = len(dataSet), len(dataSet[0])\n w, b, errorRate = zeros((dim)), 0, 1\n for _ in range(numIter): # 迭代训练数据\n flag = True # 修正标记:当遍历完所有样本都无须修改则表示完成训练\n errorCount = 0.0\n for i in range(trainSetSize): # 逐行数据带入判别式,判断预测分类与实际分类是否相符,直到每行都分类正确\n res = classList[i] * (dot(array(dataSet[i]), w) + b) # dot:求向量内积\n if res <= 0: # 实际与预测结果异号即错判:梯度下降更新w、b(公式见《统计学习方法》P29)\n flag = False\n errorCount += 1.0\n w += learningRate * classList[i] * array(dataSet[i])\n b += learningRate * classList[i]\n errorRate = errorCount / trainSetSize\n if flag:\n break\n f = lambda x: -1 if dot(w, array(x)) + b < 0 else 1\n return f, errorRate # 返回感知机模型函数及错误率:f(x) = sign(w * x + b)\n\n\nif __name__ == \"__main__\":\n dataSet = [[3, 3], [4, 3], [3, 1], [0, 0], [2, 0], [0, 2], [3, 0], [2, 5]]\n classList = [1, 1, -1, 1, -1, 1, -1, 1]\n f, errorRate = classfy(dataSet, classList)\n print(f([0, -1]), errorRate)" } ]
13
PedroRodrigues-dev/ARTIFICIAL-INTELLIGENCE-AND-DATA-SCIENCE
https://github.com/PedroRodrigues-dev/ARTIFICIAL-INTELLIGENCE-AND-DATA-SCIENCE
d13621ef2f2bd36a01c0d5964be29a6098b947af
0b224144cf6cf8def46b71306ea403c53d8c7099
74b71b7fa164c18691c20d004f11d6b4214d4c80
refs/heads/main
2023-08-31T15:55:33.632435
2021-10-17T15:42:10
2021-10-17T15:42:10
418,175,729
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7868421077728271, "alphanum_fraction": 0.8052631616592407, "avg_line_length": 62.33333206176758, "blob_id": "09638cebb871bb202d3d708b6623261a5c63f4da", "content_id": "d5481d909c8291389cbb4c349d7b9461966c7561", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 380, "license_type": "no_license", "max_line_length": 98, "num_lines": 6, "path": "/README.md", "repo_name": "PedroRodrigues-dev/ARTIFICIAL-INTELLIGENCE-AND-DATA-SCIENCE", "src_encoding": "UTF-8", "text": "# ARTIFICIAL-INTELLIGENCE-AND-DATA-SCIENCE\n\nEste repositorio tem como objetivo abordar estudos em Inteligencia Artificial e Ciencia de dados, \nnele vemos a utilizacao de varias bibliotecas como scikit-learn, numpy e pybrain, as ferramentas\nutilizadas foram o Anaconda 3 e o Spyder 3 e 4, as versoes de python utilizadas foram a 3.5 para\no PyBrain e a 3.8 para os demais projetos.\n" }, { "alpha_fraction": 0.6474500894546509, "alphanum_fraction": 0.725055456161499, "avg_line_length": 24.05555534362793, "blob_id": "e404262dd2d29ad96aee604adb2dfe3339315d1c", "content_id": "cb34e433b6878e69aba2d61c476dd2c99057726a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 451, "license_type": "no_license", "max_line_length": 119, "num_lines": 18, "path": "/scikit_learn_iris.py", "repo_name": "PedroRodrigues-dev/ARTIFICIAL-INTELLIGENCE-AND-DATA-SCIENCE", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 13 01:32:05 2021\n\n@author: pedro\n\"\"\"\n\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn import datasets\n\niris = datasets.load_iris()\nentradas = iris.data\nsaidas = iris.target\n\nredeNeural = MLPClassifier(verbose = True, max_iter=1000, tol=0.00001, activation='logistic', learning_rate_init=0.001)\nredeNeural.fit(entradas, saidas)\nredeNeural.predict([[9, 7.2, 5.1, 10]])\n" } ]
2
invictuscapital/ccxt
https://github.com/invictuscapital/ccxt
e832a63685f7514cb69015b3267067da8f6c29c8
319847767daaa76e95618729ff2cebd4e2723b14
38421db0c83b198506751e33e8776820ada5f3d2
refs/heads/master
2023-03-29T00:40:56.098496
2021-05-20T10:34:26
2021-05-20T10:34:26
290,838,081
0
0
MIT
2020-08-27T17:22:16
2020-08-27T17:22:19
2021-03-31T10:24:11
null
[ { "alpha_fraction": 0.4546660780906677, "alphanum_fraction": 0.5242518186569214, "avg_line_length": 49.61940383911133, "blob_id": "caeb9888ec508568d7e3b6355ab4fbe9d9807dc2", "content_id": "b879a29da73f259720aa62860789b35ed79030d0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6785, "license_type": "permissive", "max_line_length": 162, "num_lines": 134, "path": "/python/ccxt/async_support/binanceusdm.py", "repo_name": "invictuscapital/ccxt", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:\n# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code\n\nfrom ccxt.async_support.binance import binance\n\n\nclass binanceusdm(binance):\n\n def describe(self):\n return self.deep_extend(super(binanceusdm, self).describe(), {\n 'id': 'binanceusdm',\n 'name': 'Binance USDⓈ-M',\n 'urls': {\n 'logo': 'https://user-images.githubusercontent.com/1294454/117738721-668c8d80-b205-11eb-8c49-3fad84c4a07f.jpg',\n },\n 'options': {\n 'defaultType': 'future',\n },\n # https://www.binance.com/en/fee/futureFee\n 'fees': {\n 'trading': {\n 'tierBased': True,\n 'percentage': True,\n 'taker': self.parse_number('0.000400'),\n 'maker': self.parse_number('0.000200'),\n 'tiers': {\n 'taker': [\n [self.parse_number('0'), self.parse_number('0.000400')],\n [self.parse_number('250'), self.parse_number('0.000400')],\n [self.parse_number('2500'), self.parse_number('0.000350')],\n [self.parse_number('7500'), self.parse_number('0.000320')],\n [self.parse_number('22500'), self.parse_number('0.000300')],\n [self.parse_number('50000'), self.parse_number('0.000270')],\n [self.parse_number('100000'), self.parse_number('0.000250')],\n [self.parse_number('200000'), self.parse_number('0.000220')],\n [self.parse_number('400000'), self.parse_number('0.000200')],\n [self.parse_number('750000'), self.parse_number('0.000170')],\n ],\n 'maker': [\n [self.parse_number('0'), self.parse_number('0.000200')],\n [self.parse_number('250'), self.parse_number('0.000160')],\n [self.parse_number('2500'), self.parse_number('0.000140')],\n [self.parse_number('7500'), self.parse_number('0.000120')],\n [self.parse_number('22500'), self.parse_number('0.000100')],\n [self.parse_number('50000'), self.parse_number('0.000080')],\n [self.parse_number('100000'), self.parse_number('0.000060')],\n [self.parse_number('200000'), self.parse_number('0.000040')],\n [self.parse_number('400000'), self.parse_number('0.000020')],\n [self.parse_number('750000'), self.parse_number('0')],\n ],\n },\n },\n },\n })\n\n async def fetch_trading_fees(self, params={}):\n await self.load_markets()\n marketSymbols = list(self.markets.keys())\n fees = {}\n accountInfo = await self.fapiPrivateGetAccount(params)\n # {\n # \"feeTier\": 0, # account commisssion tier\n # \"canTrade\": True, # if can trade\n # \"canDeposit\": True, # if can transfer in asset\n # \"canWithdraw\": True, # if can transfer out asset\n # \"updateTime\": 0,\n # \"totalInitialMargin\": \"0.00000000\", # total initial margin required with current mark price(useless with isolated positions), only for USDT asset\n # \"totalMaintMargin\": \"0.00000000\", # total maintenance margin required, only for USDT asset\n # \"totalWalletBalance\": \"23.72469206\", # total wallet balance, only for USDT asset\n # \"totalUnrealizedProfit\": \"0.00000000\", # total unrealized profit, only for USDT asset\n # \"totalMarginBalance\": \"23.72469206\", # total margin balance, only for USDT asset\n # \"totalPositionInitialMargin\": \"0.00000000\", # initial margin required for positions with current mark price, only for USDT asset\n # \"totalOpenOrderInitialMargin\": \"0.00000000\", # initial margin required for open orders with current mark price, only for USDT asset\n # \"totalCrossWalletBalance\": \"23.72469206\", # crossed wallet balance, only for USDT asset\n # \"totalCrossUnPnl\": \"0.00000000\", # unrealized profit of crossed positions, only for USDT asset\n # \"availableBalance\": \"23.72469206\", # available balance, only for USDT asset\n # \"maxWithdrawAmount\": \"23.72469206\" # maximum amount for transfer out, only for USDT asset\n # ...\n # }\n feeTier = self.safe_integer(accountInfo, 'feeTier')\n feeTiers = self.fees['trading']['tiers']\n maker = feeTiers['maker'][feeTier][1]\n taker = feeTiers['taker'][feeTier][1]\n for i in range(0, len(marketSymbols)):\n symbol = marketSymbols[i]\n fees[symbol] = {\n 'info': {\n 'feeTier': feeTier,\n },\n 'symbol': symbol,\n 'maker': maker,\n 'taker': taker,\n }\n return fees\n\n async def transfer_in(self, code, amount, params={}):\n # transfer from spot wallet to usdm futures wallet\n return await self.futuresTransfer(code, amount, 1, params)\n\n async def transfer_out(self, code, amount, params={}):\n # transfer from usdm futures wallet to spot wallet\n return await self.futuresTransfer(code, amount, 2, params)\n\n async def fetch_funding_rate(self, symbol=None, params=None):\n await self.load_markets()\n market = None\n request = {}\n if symbol is not None:\n market = self.market(symbol)\n request['symbol'] = market['id']\n response = await self.fapiPublicGetPremiumIndex(self.extend(request, params))\n #\n # {\n # \"symbol\": \"BTCUSDT\",\n # \"markPrice\": \"45802.81129892\",\n # \"indexPrice\": \"45745.47701915\",\n # \"estimatedSettlePrice\": \"45133.91753671\",\n # \"lastFundingRate\": \"0.00063521\",\n # \"interestRate\": \"0.00010000\",\n # \"nextFundingTime\": \"1621267200000\",\n # \"time\": \"1621252344001\"\n # }\n #\n if isinstance(response, list):\n result = []\n values = list(response.values())\n for i in range(0, len(values)):\n parsed = self.parseFundingRate(values[i])\n result.append(parsed)\n return result\n else:\n return self.parseFundingRate(response)\n" } ]
1
lzhzero/SHARDS-C
https://github.com/lzhzero/SHARDS-C
93170b614490c17ea91aa43b27bd43728c931090
6f461ce0b6ea0e7e95c824e6d1c31a0017ec9114
73a3669642b81f205a250dac75a1ec8bb07d91a8
refs/heads/master
2020-03-12T21:37:27.245415
2019-06-10T15:14:49
2019-06-10T15:14:49
130,831,945
0
0
null
2018-04-24T09:45:10
2018-04-23T23:11:44
2018-03-16T20:19:09
null
[ { "alpha_fraction": 0.46851786971092224, "alphanum_fraction": 0.5015918016433716, "avg_line_length": 36.034934997558594, "blob_id": "54f8dfe0ca77c7f717bbc318556f4fa2c642dacf", "content_id": "30e8b05ec6cf3a292baff16b5b9d46476bc25935", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 16962, "license_type": "no_license", "max_line_length": 162, "num_lines": 458, "path": "/src/shards_3d_talus_mini.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h>\n#include <glib.h>\n#include <time.h>\n#include <stdint.h>\n#include \"SHARDS.h\"\n#include \"minisim.h\"\n\nvoid close_sub_table(gpointer key, gpointer value, gpointer userdata) {\n //fclose(value);\n g_hash_table_destroy(value);\n}\n\nint main(int argc, char** argv) {\n\n /*\targv[1] = length of each object.\n argv[2] = config file#bucket size\n argv[3] = R\n argv[4] = Tracefile\n argv[5] = mrc file\n argv[6] = op flag for 1st layer, e.g. LRU LFU, etc\n argv[7] = op flag for 2nd layer, e.g. LRU LFU\n argv[8] = s_max max tracked object size (related to memory usage and speed(mainly0)\n * argv[9] = resolution\n */\n\n int FACTOR = 262144; // number of 4KB blocks in 1GB\n\n int val;\n char second_layer[3];\n char first_layer[3];\n strcpy(first_layer, argv[6]);\n strcpy(second_layer, argv[7]);\n GHashTable *bucket_table = g_hash_table_new(g_direct_hash, g_direct_equal);\n char filename[sizeof \"0001.csv\" + sizeof argv[4]];\n\n if (strcmp(argv[6], \"LRU\") != 0 && strcmp(argv[6], \"LFU\") != 0 && strcmp(argv[6], \"MQ\") != 0 && strcmp(argv[6], \"FIFO\") != 0 && strcmp(argv[6], \"ARC\") != 0) {\n printf(\"%s\", argv[6]);\n printf(\"Current only support LRU, LFU, MQ, ARC and FIFO in first layer.\\n\");\n return -1;\n };\n if (strcmp(argv[7], \"LRU\") != 0 && strcmp(argv[7], \"LFU\") != 0 && strcmp(argv[7], \"MQ\") != 0 && strcmp(argv[7], \"FIFO\") != 0 && strcmp(argv[7], \"ARC\") != 0) {\n printf(\"%s\", argv[7]);\n printf(\"Current only support LRU, LFU, MQ, ARC and FIFO in second layer.\\n\");\n return -1;\n };\n\n\n GList *bucket_keys = NULL;\n GHashTable *bucket_mrc = NULL;\n bucket_mrc = g_hash_table_new(g_direct_hash, g_direct_equal);\n\n FILE *file;\n char filename_item_count[50];\n strcpy(filename_item_count, argv[4]);\n strcat(filename_item_count, \".count\");\n file = fopen(filename_item_count, \"r\");\n int UNIQUE_ITEM_COUNT = -1;\n\n int resolution = strtol(argv[9], NULL, 10);\n\n\n fscanf(file, \"%d\", &UNIQUE_ITEM_COUNT);\n fclose(file);\n\n printf(\"SHARDS\\n\");\n printf(\"Unique items in trace %s, is %d \\n\", argv[4], UNIQUE_ITEM_COUNT);\n\n int obj_length = strtol(argv[1], NULL, 10);\n int s_max = strtol(argv[8], NULL, 10);\n char* object = (char*) calloc((obj_length + 2), sizeof (char));\n char* shadow_object = NULL;\n\n int bucket = UNIQUE_ITEM_COUNT / resolution;\n printf(\"bucket size is %d\\n\", bucket);\n\n double R = strtod(argv[3], NULL);\n SHARDS *shards = NULL;\n clock_t start_time = clock();\n\n int cnt = 0;\n int bucket_cnt = 0;\n printf(\"compare result is %d\\n\", strcmp(first_layer, \"MQ\") != 0 && strcmp(first_layer, \"ARC\") != 0);\n\n\n //MINISIM\n //int mini_sim_count = 0;\n\n FILE *_mrc_file = fopen(argv[5], \"w\");\n printf(\"herei\\n\");\n\n\n FILE *config_file;\n char config_filename[100];\n strcpy(config_filename, argv[2]);\n config_file = fopen(config_filename, \"r\");\n\n\n\n // need to process cfg file here first\n GHashTable *cfg = NULL;\n GList *t1_list = NULL;\n\n cfg = g_hash_table_new(g_direct_hash, g_direct_equal);\n char *line_buf = NULL;\n size_t line_buff_size = 0;\n int line_count = 0;\n ssize_t line_size;\n //line_size =\n char _line[1024];\n fgets(_line, 1024, config_file);\n fgets(_line, 1024, config_file);\n //fgets(_line, 1024, config_file);\n\n while (fgets(_line, 1024, config_file)) {\n\n double cs_t1 = atof(strtok(_line, \", \"));\n double cs_t2 = atof(strtok(NULL, \", \"));\n double talus_expected_mrc = atof(strtok(NULL, \", \"));\n double talus_t1_p1 = atof(strtok(NULL, \", \"));\n double talus_t1_p2 = atof(strtok(NULL, \", \"));\n double talus_t1_s1 = atof(strtok(NULL, \", \"));\n double talus_t1_s2 = atof(strtok(NULL, \", \"));\n double talus_t2_p1 = atof(strtok(NULL, \", \"));\n double talus_t2_p2 = atof(strtok(NULL, \", \"));\n double talus_t2_s1 = atof(strtok(NULL, \", \"));\n double talus_t2_s2 = atof(strtok(NULL, \", \"));\n\n CFG_ELEM *_cfg_elem = (CFG_ELEM *) malloc(sizeof (CFG_ELEM));\n\n\n int cache_size_t1 = (int) (cs_t1 * FACTOR);\n _cfg_elem->t1_s1 = (int) (talus_t1_s1 * FACTOR);\n _cfg_elem->t1_s2 = cache_size_t1 - _cfg_elem->t1_s1;\n _cfg_elem->t1_p1 = talus_t1_p1;\n _cfg_elem->t1_p2 = talus_t1_p2;\n\n int cache_size_t2 = (int) (cs_t2 * FACTOR);\n _cfg_elem->t2_s1 = (int) (talus_t2_s1 * FACTOR);\n _cfg_elem->t2_s2 = cache_size_t2 - _cfg_elem->t2_s1;\n _cfg_elem->t2_p1 = talus_t2_p1;\n _cfg_elem->t2_p2 = talus_t2_p2;\n\n\n if (cs_t2 == 0)\n t1_list = g_list_append(t1_list, GINT_TO_POINTER(cache_size_t1));\n\n GHashTable *_tmp_table = NULL;\n if (g_hash_table_contains(cfg, GINT_TO_POINTER(cache_size_t1))) {\n _tmp_table = g_hash_table_lookup(cfg, GINT_TO_POINTER(cache_size_t1));\n g_hash_table_insert(_tmp_table, GINT_TO_POINTER(cache_size_t2), _cfg_elem);\n g_hash_table_insert(cfg, GINT_TO_POINTER(cache_size_t1), _tmp_table);\n } else {\n _tmp_table = g_hash_table_new(g_direct_hash, g_direct_equal);\n g_hash_table_insert(_tmp_table, GINT_TO_POINTER(cache_size_t2), _cfg_elem);\n g_hash_table_insert(cfg, GINT_TO_POINTER(cache_size_t1), _tmp_table);\n }\n\n }\n fclose(config_file);\n\n\n\n GList *_t1;\n //config_file = fopen(config_filename, \"r\");\n //char line[1023];\n for (_t1 = t1_list; _t1 != NULL; _t1 = _t1->next) {\n //while (fgets(line, 1024, config_file)) {\n //for (int cache_size = 0; cache_size <= UNIQUE_ITEM_COUNT; cache_size += bucket) {\n\n\n\n CFG_ELEM *_ce = g_hash_table_lookup(g_hash_table_lookup(cfg, _t1->data), GINT_TO_POINTER(0));\n\n\n //double cs_t1 = GPOINTER_TO_INT(_t1->data);\n double talus_t1_p1 = _ce->t1_p1;\n double talus_t1_p2 = _ce->t1_p2;\n double talus_t1_s1 = _ce->t1_s1;\n double talus_t1_s2 = _ce->t1_s2;\n\n\n double *mrc_pointer = malloc(sizeof (double));\n\n\n //cache_size for first layer cache\n\n int cache_size_t1 = GPOINTER_TO_INT(_t1->data);\n int cache_size_t1_s1 = _ce->t1_s1;\n int cache_size_t1_s2 = cache_size_t1 - cache_size_t1_s1;\n printf(\"\\n \\n\");\n printf(\"cache size for tier one is %d / %d \\n\", cache_size_t1_s1 + cache_size_t1_s2, UNIQUE_ITEM_COUNT);\n printf(\"cache size for tier one partition 1 is %d \\n\", cache_size_t1_s1);\n printf(\"cache size for tier one partition 2 is %d \\n\", cache_size_t1_s2);\n\n int cache_size_R_t1_s1 = cache_size_t1_s1 * R;\n int cache_size_R_t1_s2 = cache_size_t1_s2 * R;\n\n // if (mini_sim_count > 100) {\n // printf(\"too much mini sim, more than 100\");\n // exit(1);\n // } else {\n // mini_sim_count++;\n // }\n\n MINISIM * _minisim_t1_s1 = NULL;\n MINISIM * _minisim_t1_s2 = NULL;\n if (strcmp(first_layer, \"ARC\") == 0) {\n _minisim_t1_s1 = MINISIM_new_init_ARC(cache_size_R_t1_s1);\n _minisim_t1_s2 = MINISIM_new_init_ARC(cache_size_R_t1_s2);\n } else if (strcmp(first_layer, \"LRU\") == 0) {\n _minisim_t1_s1 = MINISIM_new_init_LRU(cache_size_R_t1_s1);\n _minisim_t1_s2 = MINISIM_new_init_LRU(cache_size_R_t1_s2);\n } else exit(1);\n\n sprintf(filename, \"%d%04d.csv\", getpid(), cache_size_t1 / bucket);\n file = fopen(argv[4], \"r\");\n\n //FILE *_mrc_file = fopen(argv[5], \"a\");\n FILE *_cache_miss_file = fopen(filename, \"w\");\n printf(\"file name is %s \\n\", filename);\n cnt = 0;\n //int hit = 0;\n printf(\"t1_p1 is %f \\n\", talus_t1_p1);\n while (fgets(object, obj_length + 2, file) != NULL) {\n cnt++;\n if (cnt % 10000000 == 0)\n printf(\"cnt is %d \\n\", cnt);\n uint64_t _hash[2];\n uint64_t _T_i = 0;\n uint64_t tmp = 1;\n tmp = tmp << 24;\n qhashmurmur3_128(object, obj_length, _hash);\n _T_i = _hash[1] &(tmp - 1);\n //_T_i = 1000000000;\n if (_T_i >= tmp * R) {\n //object = (char*) calloc((obj_length + 2), sizeof (char));\n continue;\n }\n\n if (cache_size_t1 == 0) {\n fprintf(_cache_miss_file, \"%s\", object);\n continue;\n }\n\n unsigned int obj_int = strtol(object, NULL, 10);\n //printf(\"%d\\n\", cnt);\n //if(cnt%1000==0 && cnt<10000) printf(\"%d\\n\", cnt);\n //if (cnt % 10000 == 0) printf(\"%d\\n\", cnt);\n //SHARDS_feed_obj(_shards, object, obj_length, second_layer, 2);\n\n //downsampling here\n //printf(\"t1_p1 is %f\\n\", talus_t1_p1);\n //printf(\"hash1 is %d \\n\", _hash[1]);\n //printf(\"T_i is %d, and t1_p1 * tmp is %f \\n\", _T_i, talus_t1_p1 * tmp);\n\n if (_T_i <= tmp * R * talus_t1_p1) {\n int ret = MINISIM_get_obj(_minisim_t1_s1, obj_int, obj_length, first_layer, 1);\n if (ret == 0) {\n // cache miss\n // if first layer, write obj to file for later processing\n // printf(\"cache miss\\n\")\n fprintf(_cache_miss_file, \"%s\", object);\n if (cache_size_t1_s1 != 0)\n MINISIM_put_obj(_minisim_t1_s1, obj_int, obj_length, first_layer, 2);\n } else if (ret == 1) {\n // cache hit\n //printf(\"cache hit\\n\");\n // hit++;\n }\n } else {\n int ret = MINISIM_get_obj(_minisim_t1_s2, obj_int, obj_length, first_layer, 1);\n if (ret == 0) {\n // cache miss\n // if first layer, write obj to file for later processing\n // printf(\"cache miss\\n\")\n fprintf(_cache_miss_file, \"%s\", object);\n if (cache_size_t1_s2 != 0)\n MINISIM_put_obj(_minisim_t1_s2, obj_int, obj_length, first_layer, 2);\n } else if (ret == 1) {\n // cache hit\n //printf(\"cache hit\\n\");\n // hit++;\n }\n }\n\n\n //object = (char*) calloc((obj_length + 2), sizeof (char));\n\n }\n //printf(\"cached size is %d\\n\", g_hash_table_size(_minisim->ARC_cached));\n fclose(file);\n fclose(_cache_miss_file);\n\n //:#####################worked till here\n\n\n\n printf(\"t1_s1 hitcount/count = %d/%d \\n\", _minisim_t1_s1->hitcount, _minisim_t1_s1->count);\n printf(\"t1_s2 hitcount/count = %d/%d \\n\", _minisim_t1_s2->hitcount, _minisim_t1_s2->count);\n\n if (cache_size_t1 != 0)\n fprintf(_mrc_file, \"%d, %d, %1.7f\\n\", cache_size_t1, 0, \\\n 1.0 - (double) (_minisim_t1_s1->hitcount + _minisim_t1_s2->hitcount) / (double) (_minisim_t1_s1->count + _minisim_t1_s2->count));\n\n if (cache_size_t1 == 0)\n *mrc_pointer = 1.0;\n else\n *mrc_pointer = 1.0 - (double) (_minisim_t1_s1->hitcount + _minisim_t1_s2->hitcount) / (double) (_minisim_t1_s1->count + _minisim_t1_s2->count);\n g_hash_table_insert(bucket_mrc, GINT_TO_POINTER(cache_size_t1 / bucket), mrc_pointer);\n\n\n free(_minisim_t1_s1);\n free(_minisim_t1_s2);\n }\n fclose(_mrc_file);\n //fclose(config_file);\n\n\n\n\n\n\n\n\n\n\n clock_t end_time = clock();\n printf(\"%ld\\n\", start_time);\n printf(\"%ld\\n\", end_time);\n int total_time = ((end_time - start_time)) / CLOCKS_PER_SEC;\n printf(\"TIME: %d\\n\", total_time);\n //unsigned int objects_parsed = shards->total_objects;\n\n //double throughput = objects_parsed / (total_time + 1);\n //SHARDS_free(shards);\n //printf(\"Throughput: %f\\n\", throughput);\n\n\n //Generate the rest MRCs\n //R=1.0;\n for (int bucket_c = 0; bucket_c <= UNIQUE_ITEM_COUNT; bucket_c += bucket) {\n printf(\"current %d / %d \\n\", bucket_c / bucket, resolution);\n int mini_sim_count = 0;\n\n\n int t1_size = GPOINTER_TO_INT(g_list_nth_data(t1_list, bucket_c / bucket));\n GHashTable *_tb = g_hash_table_lookup(cfg, GINT_TO_POINTER(t1_size));\n GList *_key = g_hash_table_get_keys(_tb);\n _key = g_list_sort(_key, intcmp_gdirect);\n while (_key != NULL) {\n //////////////////////for (int cache_size = bucket; cache_size <= UNIQUE_ITEM_COUNT; cache_size += bucket) {\n //printf(\"cache size is %d / %d \\n\", cache_size, UNIQUE_ITEM_COUNT);\n CFG_ELEM *_ce = g_hash_table_lookup(_tb, _key->data);\n\n\n int cache_size_t2 = GPOINTER_TO_INT(_key->data);\n int cache_size_t2_s1 = _ce->t2_s1;\n int cache_size_t2_s2 = cache_size_t2 - cache_size_t2_s1;\n printf(\"cache size for tier two is %d / %d \\n\", cache_size_t2_s1 + cache_size_t2_s2, UNIQUE_ITEM_COUNT);\n printf(\"cache size for tier twp partition 1 is %d \\n\", cache_size_t2_s1);\n printf(\"cache size for tier twp partition 2 is %d \\n\", cache_size_t2_s2);\n\n int cache_size_R_t2_s1 = cache_size_t2_s1 * R;\n int cache_size_R_t2_s2 = cache_size_t2_s2 * R;\n\n if (mini_sim_count > 100) {\n printf(\"too much mini sim, more than 100\");\n exit(1);\n } else {\n mini_sim_count++;\n }\n\n MINISIM *_minisim_t2_s1 = NULL;\n MINISIM *_minisim_t2_s2 = NULL;\n if (strcmp(second_layer, \"ARC\") == 0) {\n _minisim_t2_s1 = MINISIM_new_init_ARC(cache_size_R_t2_s1);\n _minisim_t2_s2 = MINISIM_new_init_ARC(cache_size_R_t2_s2);\n } else if (strcmp(second_layer, \"LRU\") == 0) {\n _minisim_t2_s1 = MINISIM_new_init_LRU(cache_size_R_t2_s1);\n _minisim_t2_s2 = MINISIM_new_init_LRU(cache_size_R_t2_s2);\n } else exit(1);\n\n sprintf(filename, \"%d%04d.csv\", getpid(), bucket_c / bucket);\n file = fopen(filename, \"r\");\n\n FILE *_mrc_file = fopen(argv[5], \"a\");\n\n\n cnt = 0;\n //int hit = 0;\n while (fgets(object, obj_length + 2, file) != NULL) {\n uint64_t _hash[2];\n uint64_t _T_i = 0;\n uint64_t tmp = 1;\n tmp = tmp << 24;\n qhashmurmur3_128(object, obj_length, _hash);\n _T_i = _hash[1] &(tmp - 1);\n\n\n\n unsigned int obj_int = strtol(object, NULL, 10);\n //printf(\"%d\\n\", cnt);\n //if(cnt%1000==0 && cnt<10000) printf(\"%d\\n\", cnt);\n //if (cnt % 10000 == 0) printf(\"%d\\n\", cnt);\n //SHARDS_feed_obj(_shards, object, obj_length, second_layer, 2);\n\n if (_T_i <= _ce->t2_p1 * tmp * R) {\n int ret = MINISIM_get_obj(_minisim_t2_s1, obj_int, obj_length, second_layer, 2);\n if (ret == 0) {\n // cache miss\n // if first layer, write obj to file for later processing\n // printf(\"cache miss\\n\")\n if (cache_size_t2_s1 != 0)\n MINISIM_put_obj(_minisim_t2_s1, obj_int, obj_length, second_layer, 2);\n } else if (ret == 1) {\n // cache hit\n //printf(\"cache hit\\n\");\n // hit++;\n }\n } else {\n int ret = MINISIM_get_obj(_minisim_t2_s2, obj_int, obj_length, second_layer, 2);\n if (ret == 0) {\n // cache miss\n // if first layer, write obj to file for later processing\n // printf(\"cache miss\\n\")\n if (cache_size_t2_s2 != 0)\n MINISIM_put_obj(_minisim_t2_s2, obj_int, obj_length, second_layer, 2);\n } else if (ret == 1) {\n // cache hit\n //printf(\"cache hit\\n\");\n // hit++;\n }\n }\n\n //object = (char*) calloc((obj_length + 2), sizeof (char));\n cnt++;\n }\n //printf(\"cached size is %d\\n\", g_hash_table_size(_minisim->ARC_cached));\n fclose(file);\n fprintf(_mrc_file, \"%d, %d, %1.7f\\n\", bucket_c, cache_size_t2, \\\n (1 - (double) (_minisim_t2_s1->hitcount + _minisim_t2_s2->hitcount) / (double) (_minisim_t2_s1->count + _minisim_t2_s2->count)) * \\\n *(double*) g_hash_table_lookup(bucket_mrc, GINT_TO_POINTER(bucket_c / bucket)));\n fclose(_mrc_file);\n\n\n _key = _key->next;\n }\n\n }\n\n\n\n\n\n return 0;\n\n}\n" }, { "alpha_fraction": 0.5431711077690125, "alphanum_fraction": 0.5568092465400696, "avg_line_length": 36.74444580078125, "blob_id": "187d20b67796574a6f825976fa1bb02dd434dbe0", "content_id": "2dc2e7360fcfea321355b13e972dfd7fed47a0db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10192, "license_type": "no_license", "max_line_length": 213, "num_lines": 270, "path": "/src/shards_3d.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h>\n#include <glib.h>\n#include <time.h>\n#include <stdint.h>\n#include \"SHARDS.h\"\n\nvoid close_sub_table(gpointer key, gpointer value, gpointer userdata) {\n //fclose(value);\n g_hash_table_destroy(value);\n}\n\nint main(int argc, char** argv) {\n\n /*\targv[1] = length of each object.\n argv[2] = bucket size\n argv[3] = R\n argv[4] = Tracefile\n argv[5] = mrc file\n argv[6] = op flag for 1st layer, e.g. LRU LFU, etc\n argv[7] = op flag for 2nd layer, e.g. LRU LFU\n argv[8] = s_max max tracked object size (related to memory usage and speed(mainly0)\n */\n int val;\n char second_layer[3];\n char first_layer[3];\n strcpy(first_layer, argv[6]);\n strcpy(second_layer, argv[7]);\n GHashTable *bucket_table = g_hash_table_new(g_direct_hash, g_direct_equal);\n char filename[sizeof \"0001.csv\"];\n\n if (strcmp(argv[6], \"LRU\") != 0 && strcmp(argv[6], \"LFU\") != 0 && strcmp(argv[6], \"MQ\") != 0 && strcmp(argv[7], \"FIFO\") != 0) {\n printf(\"%s\", argv[6]);\n printf(\"Current only support LRU and LFU.\\n\");\n return -1;\n };\n if (strcmp(argv[7], \"LRU\") != 0 && strcmp(argv[7], \"LFU\") != 0 && strcmp(argv[7], \"MQ\") != 0 && strcmp(argv[7], \"FIFO\") != 0) {\n printf(\"%s\", argv[7]);\n printf(\"Current only support LRU and LFU.\\n\");\n return -1;\n };\n\n\n printf(\"SHARDS\\n\");\n int obj_length = strtol(argv[1], NULL, 10);\n int s_max = strtol(argv[8], NULL, 10);\n char* object = (char*) calloc((obj_length + 2), sizeof (char));\n char* shadow_object = NULL;\n int bucket = strtol(argv[2], NULL, 10);\n\n\n\n double R = strtod(argv[3], NULL);\n SHARDS *shards = NULL;\n if (strcmp(argv[6], \"LRU\") == 0)\n shards = SHARDS_fixed_size_init_R(s_max, R, (unsigned int) bucket, String);\n else if (strcmp(argv[6], \"LFU\") == 0)\n shards = SHARDS_fixed_size_init_R_LFU(s_max, R, (unsigned int) bucket, String);\n else if (strcmp(argv[6], \"MQ\") == 0)\n shards = SHARDS_fixed_size_init_R_MQ(s_max, R, (unsigned int) bucket, String);\n\n FILE *file;\n file = fopen(argv[4], \"r\");\n clock_t start_time = clock();\n\n int cnt = 0;\n int bucket_cnt = 0;\n\n while (fgets(object, obj_length + 2, file) != NULL) {\n shadow_object = (char*) calloc((obj_length + 2), sizeof (char));\n strncpy(shadow_object, object, obj_length + 2);\n val = SHARDS_feed_obj(shards, object, obj_length, argv[6], 1);\n //printf(\"val is %d \\n\", val);\n if (val != -1) {\n if (!g_hash_table_contains(bucket_table, GINT_TO_POINTER(val))) {\n //sprintf(filename, \"%04d.csv\", val/bucket);\n //FILE *sub_f = fopen(filename,\"w\");\n GHashTable *sub_table = g_hash_table_new(g_direct_hash, g_direct_equal);\n g_hash_table_insert(bucket_table, GINT_TO_POINTER(val), GINT_TO_POINTER(sub_table));\n //g_hash_table_insert(bucket_table, &val, sub_f);\n //fprintf(sub_f, \"%s,%d\\n\", strtok(shadow_object, \"\\n\"), cnt);\n g_hash_table_insert(sub_table, GINT_TO_POINTER(cnt), shadow_object);\n bucket_cnt++;\n } else {\n //fprintf(g_hash_table_lookup(bucket_table, &val), \"%s,%d\\n\", strtok(shadow_object, \"\\n\"), cnt);\n g_hash_table_insert(g_hash_table_lookup(bucket_table, GINT_TO_POINTER(val)), GINT_TO_POINTER(cnt), shadow_object);\n }\n }\n //else{\n //\tprintf(val);\n //}\n\n object = (char*) calloc((obj_length + 2), sizeof (char));\n cnt++;\n /*\n if(cnt==100000){\n break;\n }\n */\n }\n\n\n\n\n //////////////////////////////////////////////////\n // generate sub inputs for 2nd layer calculation\n int index = 0;\n\n GHashTable *out_table = g_hash_table_lookup(bucket_table, GINT_TO_POINTER(index));\n printf(\"g hash table size is :%d\\n\", g_hash_table_size(out_table));\n\n\n //index = 262144;\n //printf(\"g hash table size is :%d\\n\", g_hash_table_size(g_hash_table_lookup(bucket_table, GINT_TO_POINTER(index))));\n\n GList *bucket_keys = g_hash_table_get_keys(bucket_table);\n bucket_keys = g_list_sort(bucket_keys, (GCompareFunc) intcmp_gdirect);\n GList *out_keys = g_hash_table_get_keys(out_table);\n out_keys = g_list_sort(out_keys, (GCompareFunc) intcmp_gdirect);\n sprintf(filename, \"%04d.csv\", GPOINTER_TO_INT(g_list_last(bucket_keys)->data) / bucket);\n printf(\"file name is %s \\n\", filename);\n FILE *out_f = fopen(filename, \"w\");\n while (out_keys != NULL) {\n fprintf(out_f, \"%s\", (char*) g_hash_table_lookup(out_table, out_keys->data));\n //printf(out_keys->data);\n out_keys = out_keys->next;\n }\n fclose(out_f);\n\n for (GList *iter_key = g_list_last(bucket_keys); iter_key->prev != NULL; iter_key = iter_key->prev) {\n index = GPOINTER_TO_INT(iter_key->data);\n\n if (!g_hash_table_contains(bucket_table, GINT_TO_POINTER(index)))\n continue;\n GHashTable *tmp_table = g_hash_table_lookup(bucket_table, GINT_TO_POINTER(index));\n\n printf(\"index is %d, g hash table size is :%d\\n\", index / bucket, g_hash_table_size(out_table));\n if (tmp_table == NULL)\n continue;\n GList *tmp_keys = g_hash_table_get_keys(tmp_table);\n while (tmp_keys != NULL) {\n g_hash_table_insert(out_table, tmp_keys->data, g_hash_table_lookup(tmp_table, tmp_keys->data));\n tmp_keys = tmp_keys->next;\n }\n sprintf(filename, \"%04d.csv\", GPOINTER_TO_INT(iter_key->prev->data) / bucket);\n printf(\"file name is %s \\n\", filename);\n out_f = fopen(filename, \"w\");\n GList *out_keys = g_hash_table_get_keys(out_table);\n out_keys = g_list_sort(out_keys, (GCompareFunc) intcmp_gdirect);\n while (out_keys != NULL) {\n fprintf(out_f, \"%s\", (char*) g_hash_table_lookup(out_table, out_keys->data));\n out_keys = out_keys->next;\n }\n fclose(out_f);\n\n }\n\n\n //close all sub_f handles\n\n //bucket table is still useful during 2nd layer mrc generation\n //g_hash_table_foreach(bucket_table, close_sub_table, NULL);\n //g_hash_table_destroy(bucket_table);\n\n //double bucket_mrc[bucket_cnt];\n //bucket_mrc[0] = 1.0; // when no cache, miss ratio is 100%\n GHashTable *bucket_mrc = g_hash_table_new(g_direct_hash, g_direct_equal);\n double *mrc_pointer = malloc(sizeof (double));\n *mrc_pointer = 1.0;\n g_hash_table_insert(bucket_mrc, GINT_TO_POINTER(0), mrc_pointer);\n\n //printf(\"Loop 1 ended.\\n\");\n GHashTable *mrc = MRC(shards);\n\n\n FILE *mrc_file = fopen(argv[5], \"w\");\n GList *keys = g_hash_table_get_keys(mrc);\n keys = g_list_sort(keys, (GCompareFunc) intcmp);\n GList *first = keys;\n while (keys != NULL) {\n //printf(\"%d,%1.7f\\n\",*(int*)keys->data, *(double*)g_hash_table_lookup(mrc, keys->data) );\n fprintf(mrc_file, \"%d, 0, %1.7f\\n\", *(int*) keys->data, *(double*) g_hash_table_lookup(mrc, keys->data));\n mrc_pointer = malloc(sizeof (double));\n *mrc_pointer = *(double*) g_hash_table_lookup(mrc, keys->data);\n printf(\"mrc_pointer value is %f\\n\", *mrc_pointer);\n g_hash_table_insert(bucket_mrc, GINT_TO_POINTER(*(int*) keys->data / bucket), mrc_pointer);\n // bucket_mrc[*(int*) keys->data / bucket] = *(double*) g_hash_table_lookup(mrc, keys->data);\n keys = keys->next;\n }\n\n clock_t end_time = clock();\n\n fclose(mrc_file);\n g_list_free(first);\n\n g_hash_table_destroy(mrc);\n\n printf(\"%ld\\n\", start_time);\n printf(\"%ld\\n\", end_time);\n int total_time = ((end_time - start_time)) / CLOCKS_PER_SEC;\n printf(\"TIME: %d\\n\", total_time);\n unsigned int objects_parsed = shards->total_objects;\n\n double throughput = objects_parsed / (total_time + 1);\n SHARDS_free(shards);\n printf(\"Throughput: %f\\n\", throughput);\n\n\n //Generate the rest MRCs\n //R=1.0;\n for (int bucket_c = 0; bucket_c <= GPOINTER_TO_INT(g_list_last(bucket_keys)->data); bucket_c += bucket) {\n\n //}for (int i = 0; i < bucket_cnt; i++) {\n if (!g_hash_table_contains(bucket_table, GINT_TO_POINTER(bucket_c)))\n continue;\n printf(\"Generating 2nd layer %d / %d: \\n\", bucket_c / bucket, bucket_cnt - 1);\n printf(\"bucket_c = %d \\n\", bucket_c);\n\n SHARDS *_shards = NULL;\n printf(\"before if\\n\");\n if (strcmp(second_layer, \"LRU\") == 0) {\n printf(\"LRU \\n\");\n _shards = SHARDS_fixed_size_init_R(s_max, R, (unsigned int) bucket, String);\n } else if (strcmp(second_layer, \"LFU\") == 0) {\n printf(\"LFU\\n\");\n _shards = SHARDS_fixed_size_init_R_LFU(s_max, R, (unsigned int) bucket, String);\n } else if (strcmp(second_layer, \"MQ\") == 0) {\n printf(\"MQ\\n\");\n _shards = SHARDS_fixed_size_init_R_MQ(s_max, R, (unsigned int) bucket, String);\n }\n sprintf(filename, \"%04d.csv\", bucket_c / bucket);\n file = fopen(filename, \"r\");\n\n cnt = 0;\n\n while (fgets(object, obj_length + 2, file) != NULL) {\n //printf(\"%d\\n\", cnt);\n //if(cnt%1000==0 && cnt<10000) printf(\"%d\\n\", cnt);\n if (cnt % 10000 == 0) printf(\"%d\\n\", cnt);\n SHARDS_feed_obj(_shards, object, obj_length, second_layer, 2);\n\n object = (char*) calloc((obj_length + 2), sizeof (char));\n cnt++;\n }\n\n GHashTable *_mrc = MRC(_shards);\n\n FILE *_mrc_file = fopen(argv[5], \"a\");\n GList *_keys = g_hash_table_get_keys(_mrc);\n _keys = g_list_sort(_keys, (GCompareFunc) intcmp);\n GList *_first = _keys;\n while (_keys != NULL) {\n fprintf(_mrc_file, \"%d, %d, %1.7f\\n\", bucket_c, *(int*) _keys->data, *(double*) g_hash_table_lookup(_mrc, _keys->data) * *(double*) g_hash_table_lookup(bucket_mrc, GINT_TO_POINTER(bucket_c / bucket)));\n\n _keys = _keys->next;\n }\n\n\n fclose(_mrc_file);\n g_list_free(_first);\n\n g_hash_table_destroy(_mrc);\n\n //SHARDS_free(_shards);\n printf(\"Finished\");\n\n }\n return 0;\n\n}\n\n" }, { "alpha_fraction": 0.2771565616130829, "alphanum_fraction": 0.5495207905769348, "avg_line_length": 25.044445037841797, "blob_id": "98cd218e1ea8070bc267de22833c29b4eb8fbc13", "content_id": "c049ead4898d5048b95f769c724ff6ee31e5b951", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1252, "license_type": "no_license", "max_line_length": 230, "num_lines": 45, "path": "/Exp_M_batch_LRU_ARC.sh", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "size=1000000000\nt1=LRU\nt2=LRU\nopt=minisim\nres=50\nfor i in \\\n \"web_0 262144 0.001\"\\\n \"hm_0 65536 0.01\"\\\n \"hm_0 4096 0.01\"\\\n \"mds_0 65536 0.01\"\\\n \"mds_1 262144 0.002\"\\\n \"prn_0 65536 0.01\"\\\n \"prn_1 1048576 0.001\"\\\n \"proj_0 65526 0.02\"\\\n \"proj_1 4194304 0.0002\"\\\n \"proj_2 4194304 0.0002\"\\\n \"proj_3 65536 0.01\"\\\n \"proj_4 2097152 0.001\"\\\n \"prxy_0 8192 0.1\"\\\n \"rsrch_0 2048 0.1\"\\\n \"rsrch_1 64 1.0\"\\\n \"rsrch_2 2048 0.1\"\\\n \"src1_0 4194304 0.0001\"\\\n \"src1_2 32768 0.01\"\\\n \"src2_0 8192 0.05\"\\\n \"src2_1 32768 0.001\"\\\n \"src2_2 65536 0.002\"\\\n \"stg_0 65536 0.002\"\\\n \"stg_1 131072 0.001\"\\\n \"ts_0 16384 0.01\"\\\n \"usr_0 65536 0.01\"\\\n \"usr_2 4194304 0.001\"\\\n \"wdev_0 4096 0.1\"\\\n \"web_1 131072 0.01\"\\\n \"web_2 1048576 0.001\" \\\n \"web_3 16384 0.01\"\n\n# \"src1_1 1310720\"\ndo\n set -- $i\n [ -f ${HOME}/orca/traces/msr-mrc/${opt}$_{1}.${3}.mrc_.${t1}_${t2} ] && echo \"${1} found\" && continue\n eval \"./shards_3d_talus_mini 20 $HOME/orca/traces/${opt}_${1}.${3}.mrc_${t1}_${t2}.cfg ${3} $HOME/orca/traces/msr/$1.csv.gz.out ${HOME}/orca/traces/msr-mrc/${opt}_${1}.${3}.mrc_${t1}_${t2} $t1 $t2 $size $res > /dev/null 2>&1 &\"\n echo \"procesed ${HOME}/orca/traces/$1.csv.gz.out\"\n #eval \"rm *.csv\"\ndone\n \n\n \n \n \n \n" }, { "alpha_fraction": 0.5309814810752869, "alphanum_fraction": 0.555404782295227, "avg_line_length": 28.078947067260742, "blob_id": "56b5d4fb761bc72d14fd35dc523fcde976c1a328", "content_id": "fdcbe19c5d31268908b60a67537d6b560f58df11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2211, "license_type": "no_license", "max_line_length": 91, "num_lines": 76, "path": "/src/minisim_test.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "/*\n * To change this license header, choose License Headers in Project Properties.\n * To change this template file, choose Tools | Templates\n * and open the template in the editor.\n */\n\n/*\n * File: minisim_test.c\n * Author: zliu\n *\n * Created on March 7, 2019, 8:32 AM\n */\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <glib.h>\n#include <time.h>\n#include <stdint.h>\n#include \"SHARDS.h\"\n#include \"minisim.h\"\n\n/*\n *\n */\nint main(int argc, char** argv) {\n /*\targv[1] = algorithm.\n argv[2] = trace file\n argv[3] = cache size\n argv[4] = Tracefile\n argv[5] = mrc file\n argv[6] = op flag for 1st layer, e.g. LRU LFU, etc\n argv[7] = op flag for 2nd layer, e.g. LRU LFU\n argv[8] = s_max max tracked object size (related to memory usage and speed(mainly0)\n */\n int cache_size = strtol(argv[3], NULL, 10);\n int obj_length = 20;\n char* object = (char*) calloc((obj_length + 2), sizeof (char));\n\n char algorithm[3];\n strcpy(algorithm, argv[1]);\n\n\n FILE *file = NULL;\n file = fopen(argv[2], \"r\");\n\n MINISIM *_minisim = NULL;\n _minisim = MINISIM_new_init_ARC(cache_size);\n int cnt = 0;\n while (fgets(object, obj_length + 2, file) != NULL) {\n //printf(\"%d\\n\", cnt);\n unsigned int obj_int = strtol(object, NULL, 10);\n\n if (cnt % 1000 == 0 && cnt < 10000) printf(\"%d\\n\", cnt);\n if (cnt % 10000 == 0) printf(\"%d\\n\", cnt);\n //SHARDS_feed_obj(_shards, object, obj_length, second_layer, 2);\n int ret = MINISIM_get_obj(_minisim, obj_int, obj_length, algorithm, 2);\n //printf(\"ret is %d\\n\", ret);\n if (ret == 0) {\n // cache miss\n // if first layer, write obj to file for later processing\n //printf(\"cache miss\\n\");\n MINISIM_put_obj(_minisim, obj_int, obj_length, algorithm, 2);\n } else if (ret == 1) {\n // cache hit\n //printf(\"cache hit\\n\");\n // hit++;\n }\n cnt++;\n //object = (char*) calloc((obj_length + 2), sizeof (char));\n\n }\n printf(\"total hit is %d\\n\", _minisim->ARC_hitcount);\n printf(\"total access is %d\\n\", _minisim->ARC_count);\n\n return (EXIT_SUCCESS);\n}\n\n" }, { "alpha_fraction": 0.6155098080635071, "alphanum_fraction": 0.6305409073829651, "avg_line_length": 33.69767379760742, "blob_id": "8b84e99b029076afe7bdcc97ecf37e89f4bb91ec", "content_id": "ec6fcc9559d00d4e813ed57a5202fc23a5bfb880", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 10445, "license_type": "no_license", "max_line_length": 139, "num_lines": 301, "path": "/src/shards_3d_err.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h>\n#include <glib.h>\n#include <time.h>\n#include <stdint.h>\n#include \"SHARDS.h\"\n\nvoid close_sub_table(gpointer key, gpointer value, gpointer userdata){\n\t//fclose(value);\n\tg_hash_table_destroy(value);\n}\nint main(int argc, char** argv){\n\n\t/*\targv[1] = length of each object.\n\t\targv[2] = bucket size\n\t\targv[3] = R\n\t\targv[4] = Tracefile\n\t\targv[5] = mrc file\n\t\targv[6] = op flag for 1st layer, e.g. LRU LFU, etc\n\t\targv[7] = op flag for 2nd layer, e.g. LRU LFU\n\t\targv[8] = s_max max tracked object size (related to memory usage and speed(mainly0)\n\t*/\n\tint val;\n\tGHashTable *bucket_table_lru = g_hash_table_new(g_direct_hash, g_direct_equal);\n\tGHashTable *bucket_table_lfu = g_hash_table_new(g_direct_hash, g_direct_equal);\n\tGHashTable *bucket_table = NULL;\n\tchar filename[sizeof \"0001.csv\"];\n\n\tif(strcmp(argv[6], \"LRU\") != 0 && strcmp(argv[6], \"LFU\") != 0){\n\t\tprintf(\"%s\",argv[6]);\n\t\tprintf(\"Current only support LRU and LFU.\\n\");\n\t\treturn -1;\n\t};\n\tif(strcmp(argv[7], \"LRU\") != 0 && strcmp(argv[7], \"LFU\") != 0){\n\t\tprintf(\"%s\",argv[7]);\n\t\tprintf(\"Current only support LRU and LFU.\\n\");\n\t\treturn -1;\n\t};\n\n\tprintf(\"SHARDS\\n\");\n\tint obj_length = strtol(argv[1],NULL,10);\n\tint s_max = strtol(argv[8],NULL,10);\n\tchar* object = (char*)calloc((obj_length+2),sizeof(char));\n\tchar* lru_object = (char*)calloc((obj_length+2),sizeof(char));\n\tchar* lfu_object = (char*)calloc((obj_length+2),sizeof(char));\n\tchar* shadow_object = NULL;\n\tint bucket = strtol(argv[2],NULL,10);\n\t\n\t\n\n\tdouble R = strtod(argv[3], NULL);\n\tSHARDS *shards_LRU = NULL;\n\tSHARDS *shards_LFU = NULL;\n\t//if(strcmp(argv[6], \"LRU\") == 0)\n\tshards_LRU = SHARDS_fixed_size_init_R( s_max,R, (unsigned int)bucket, String);\n\t//else if(strcmp(argv[6], \"LFU\") == 0)\n\tshards_LFU = SHARDS_fixed_size_init_R_LFU( s_max,R, (unsigned int)bucket, String);\n\tFILE *file;\n\tfile = fopen(argv[4], \"r\");\n\tclock_t start_time = clock();\n\n\tint cnt = 0;\n\tint bucket_cnt_lru = 0;\n\tint bucket_cnt_lfu = 0;\n\tint bucket_cnt_l1 = 0;\n\tint bucket_cnt_l2 = 0;\n\n\twhile(fgets(object, obj_length+2, file)!=NULL){\n\t\tlru_object = (char*)calloc((obj_length+2),sizeof(char));\n\t\tlfu_object = (char*)calloc((obj_length+2),sizeof(char));\n\t\tstrncpy(lru_object, object, obj_length+2);\n\t\tstrncpy(lfu_object, object, obj_length+2);\n\t\tval = SHARDS_feed_obj(shards_LRU, lru_object, obj_length, \"LRU\");\n\t\t//process lru first\n\t\tif(val != -1){\t\n\t\t\tif (!g_hash_table_contains(bucket_table_lru, GINT_TO_POINTER(val))){\n\t\t\t\t//sprintf(filename, \"%04d.csv\", val/bucket);\n\t\t\t\t//FILE *sub_f = fopen(filename,\"w\");\n\t\t\t\tGHashTable *sub_table = g_hash_table_new(g_direct_hash, g_direct_equal);\n\t\t\t\tg_hash_table_insert(bucket_table_lru, GINT_TO_POINTER(val), GINT_TO_POINTER(sub_table));\n\t\t\t\t//g_hash_table_insert(bucket_table, &val, sub_f);\n\t\t\t\t//fprintf(sub_f, \"%s,%d\\n\", strtok(shadow_object, \"\\n\"), cnt);\n\t\t\t\tg_hash_table_insert(sub_table, GINT_TO_POINTER(cnt), shadow_object);\n\t\t\t\tbucket_cnt_lru++;\n\t\t\t}\n\t\t\telse{\n\t\t\t\t//fprintf(g_hash_table_lookup(bucket_table, &val), \"%s,%d\\n\", strtok(shadow_object, \"\\n\"), cnt);\n\t\t\t\tg_hash_table_insert(g_hash_table_lookup(bucket_table_lru, GINT_TO_POINTER(val)), GINT_TO_POINTER(cnt), shadow_object);\n\t\t\t}\n\t\t}\n\t\t//process lfu now\n\t\tval = SHARDS_feed_obj(shards_LFU, lfu_object, obj_length, \"LFU\");\n\t\tif(val != -1){\t\n\t\t\tif (!g_hash_table_contains(bucket_table_lfu, GINT_TO_POINTER(val))){\n\t\t\t\t//sprintf(filename, \"%04d.csv\", val/bucket);\n\t\t\t\t//FILE *sub_f = fopen(filename,\"w\");\n\t\t\t\tGHashTable *sub_table = g_hash_table_new(g_direct_hash, g_direct_equal);\n\t\t\t\tg_hash_table_insert(bucket_table_lfu, GINT_TO_POINTER(val), GINT_TO_POINTER(sub_table));\n\t\t\t\t//g_hash_table_insert(bucket_table, &val, sub_f);\n\t\t\t\t//fprintf(sub_f, \"%s,%d\\n\", strtok(shadow_object, \"\\n\"), cnt);\n\t\t\t\tg_hash_table_insert(sub_table, GINT_TO_POINTER(cnt), shadow_object);\n\t\t\t\tbucket_cnt_lfu++;\n\t\t\t}\n\t\t\telse{\n\t\t\t\t//fprintf(g_hash_table_lookup(bucket_table, &val), \"%s,%d\\n\", strtok(shadow_object, \"\\n\"), cnt);\n\t\t\t\tg_hash_table_insert(g_hash_table_lookup(bucket_table_lfu, GINT_TO_POINTER(val)), GINT_TO_POINTER(cnt), shadow_object);\n\t\t\t}\n\t\t}\n\t\t\n\t\tcnt++;\n\t\t/*\n\t\tif(cnt==100000){\n\t\t\tbreak;\n\t\t}\n\t\t*/\n\t}\n\n\t\n\n\tif(strcmp(argv[6], \"LRU\") == 0){\n\t\tbucket_cnt_l1 = bucket_cnt_lru;\n\t\tbucket_cnt_l2 = bucket_cnt_lfu;\n\t}\n\telse{\n\t\tbucket_cnt_l1 = bucket_cnt_lru;\t\n\t\tbucket_cnt_l2 = bucket_cnt_lru;\n\t}\n\t//////////////////////////////////////////////////\n\t// generate sub inputs for 2nd layer calculation\n\tint index = 0;\n\n\tGHashTable *out_table_lru = g_hash_table_lookup(bucket_table_lru, GINT_TO_POINTER(index));\n\tGHashTable *out_table_lfu = g_hash_table_lookup(bucket_table_lfu, GINT_TO_POINTER(index));\n\tGHashTable *out_table = NULL;\t\t\n\tprintf(\"g hash lru table size is :%d\\n\",g_hash_table_size(out_table_lru));\n\tprintf(\"g hash lfu table size is :%d\\n\",g_hash_table_size(out_table_lfu));\n\n\n\t//index = 262144;\n\t//printf(\"g hash table size is :%d\\n\",g_hash_table_size(g_hash_table_lookup(bucket_table, GINT_TO_POINTER(index))));\n\n\tsprintf(filename, \"%04d.csv\", bucket_cnt_l1-1);\n\tFILE *out_f = fopen(filename, \"w\");\n\tGList *out_keys_lru = g_hash_table_get_keys(out_table_lru);\n\tGList *out_keys_lfu = g_hash_table_get_keys(out_table_lfu);\n\tout_keys_lru = g_list_sort(out_keys_lru, (GCompareFunc) intcmp_gdirect );\n\tout_keys_lfu = g_list_sort(out_keys_lfu, (GCompareFunc) intcmp_gdirect );\n\t\n\tif(strcmp(argv[6],\"LRU\")==0)\t\n\t\twhile(out_keys_lru!=NULL){\n\t\t\tfprintf(out_f, \"%s\", (char*)g_hash_table_lookup(out_table_lru, out_keys_lru->data));\n\t\t\tout_keys_lru = out_keys_lru->next;\n\t\t}\n\telse\n\t\twhile(out_keys_lfu!=NULL){\n\t\t\tfprintf(out_f, \"%s\", (char*)g_hash_table_lookup(out_table_lfu, out_keys_lfu->data));\n\t\t\tout_keys_lfu = out_keys_lfu->next;\n\t\t}\n\n\tfclose(out_f);\n\n\tif(strcmp(argv[6],\"LRU\")==0){\t\n\t\tbucket_table = bucket_table_lru;\n\t\tout_table = out_table_lru;\n\t}\n\telse{\n\t\tbucket_table = bucket_table_lfu;\n\t\tout_table = out_table_lfu;\n\t}\n\tfor(int bucket_c = bucket_cnt_l1 -1; bucket_c > 0; bucket_c --){\n\t\tindex = (bucket_c)*bucket;\n\t\tGHashTable *tmp_table = g_hash_table_lookup(bucket_table, GINT_TO_POINTER(index));\n\t\tprintf(\"index is %d, g hash table size is :%d\\n\", index, g_hash_table_size(out_table));\t\n\t\t\n\t\tGList *tmp_keys = g_hash_table_get_keys(tmp_table);\n\t\twhile(tmp_keys != NULL){\n\t\t\tg_hash_table_insert(out_table,tmp_keys->data, g_hash_table_lookup(tmp_table, tmp_keys->data));\n\t\t\ttmp_keys = tmp_keys->next;\n\t\t}\n\t\tsprintf(filename, \"%04d.csv\", bucket_c-1);\n\t\tout_f = fopen(filename, \"w\");\n\t\tGList *out_keys = g_hash_table_get_keys(out_table);\n\t\tout_keys = g_list_sort(out_keys, (GCompareFunc) intcmp_gdirect );\n\t\twhile(out_keys!=NULL){\n\t \tfprintf(out_f, \"%s\", (char*) g_hash_table_lookup(out_table, out_keys->data));\n\t\t\tout_keys = out_keys->next;\n\t\t}\n\t\tfclose(out_f);\n\n\t}\n\n\n\t//close all sub_f handles\n\tg_hash_table_foreach(bucket_table_lru, close_sub_table, NULL);\n\tg_hash_table_destroy(bucket_table_lru);\n\tg_hash_table_foreach(bucket_table_lfu, close_sub_table, NULL);\n\tg_hash_table_destroy(bucket_table_lfu);\n\tbucket_table = NULL;\n\n\tdouble bucket_mrc_lru[bucket_cnt_lru];\t\n\tdouble bucket_mrc_lfu[bucket_cnt_lfu];\t\n\t//printf(\"Loop 1 ended.\\n\");\n\tGHashTable *mrc_lru = MRC(shards_LRU);\n\tGHashTable *mrc_lfu = MRC(shards_LFU);\n\n\t\n\tFILE *mrc_file = fopen(argv[5],\"w\");\n\tGList *keys_lru = g_hash_table_get_keys(mrc_lru);\n\tGList *keys_lfu = g_hash_table_get_keys(mrc_lfu);\n\tkeys_lru = g_list_sort(keys_lru, (GCompareFunc) intcmp);\n\tkeys_lfu = g_list_sort(keys_lfu, (GCompareFunc) intcmp);\n \t//GList *first = keys;\n\twhile(keys_lru!=NULL){\n\t\tif(strcmp(argv[6],\"LRU\")==0)\n\t\t\tfprintf(mrc_file,\"%d, 0, %1.7f\\n\",*(int*)keys_lru->data, *(double*)g_hash_table_lookup(mrc_lru, keys_lru->data) );\n\t\telse\n\t\t\tfprintf(mrc_file,\"0, %d, %1.7f\\n\",*(int*)keys_lru->data, *(double*)g_hash_table_lookup(mrc_lru, keys_lru->data) );\n\t\tbucket_mrc_lru[*(int*)keys_lru->data / bucket -1] = *(double*)g_hash_table_lookup(mrc_lru, keys_lru->data);\n\t\tkeys_lru=keys_lru->next;\n\t}\n\twhile(keys_lfu!=NULL){\n\t\tif(strcmp(argv[6],\"LRU\")==0)\n\t\t\tfprintf(mrc_file,\"%d, 0, %1.7f\\n\",*(int*)keys_lfu->data, *(double*)g_hash_table_lookup(mrc_lfu, keys_lfu->data) );\n\t\telse\n\t\t\tfprintf(mrc_file,\"0, %d, %1.7f\\n\",*(int*)keys_lfu->data, *(double*)g_hash_table_lookup(mrc_lfu, keys_lfu->data) );\n\t\tbucket_mrc_lfu[*(int*)keys_lfu->data / bucket -1] = *(double*)g_hash_table_lookup(mrc_lfu, keys_lfu->data);\n\t\tkeys_lfu=keys_lfu->next;\n\t}\n\n\tclock_t end_time = clock();\n\t\n\tfclose(mrc_file);\n\t//g_list_free(first);\n\t\n\tg_hash_table_destroy(mrc_lru);\n\tg_hash_table_destroy(mrc_lfu);\n\t\n\tprintf(\"%ld\\n\", start_time);\n\tprintf(\"%ld\\n\", end_time);\n\tint total_time = ((end_time - start_time))/CLOCKS_PER_SEC;\n\tprintf(\"TIME: %d\\n\", total_time);\n\tunsigned int objects_parsed = shards_LRU->total_objects;\n \n\tdouble throughput = objects_parsed/(total_time+1);\n\tSHARDS_free(shards_LRU);\n\tSHARDS_free(shards_LFU);\n\tprintf(\"Throughput: %f\\n\", throughput);\n\n\n\t//Generate the rest MRCs\n\tfor(int i = 1; i < bucket_cnt_l1; i++){\n\t\tprintf(\"Generating 2nd layer %d / %d: \\n\", i, bucket_cnt_l1-1);\n\n\t\tSHARDS *_shards = NULL;\n\t\tif(strcmp(argv[7], \"LRU\") == 0)\n\t\t\t_shards = SHARDS_fixed_size_init_R( s_max,R, (unsigned int)bucket, String);\n\t\telse if(strcmp(argv[7], \"LFU\") == 0)\n\t\t\t_shards = SHARDS_fixed_size_init_R_LFU( s_max,R, (unsigned int)bucket, String);\n\t\tsprintf(filename, \"%04d.csv\", i);\n\t\tfile = fopen(filename, \"r\");\n\n\t\tcnt = 0;\n\n\t\twhile(fgets(object, obj_length+2, file)!=NULL){\n\t\t\tprintf(\"%d\\n\", cnt);\t\n\t\t\tif(cnt%1000==0 && cnt<10000) printf(\"%d\\n\", cnt);\n\t\t\tif(cnt%10000==0) printf(\"%d\\n\", cnt);\n\t\t SHARDS_feed_obj(_shards, object, obj_length, argv[7]);\n\n\t\t object = (char*)calloc((obj_length+2),sizeof(char));\n\t\t\tcnt++;\n\t\t}\n \n\t\tGHashTable *_mrc = MRC(_shards);\n\n\t\tFILE *_mrc_file = fopen(argv[5],\"a\");\n \tGList *_keys = g_hash_table_get_keys(_mrc);\n \t_keys = g_list_sort(_keys, (GCompareFunc) intcmp);\n \t\tGList *_first = _keys;\n \twhile(_keys!=NULL){\n \tif(strcmp(argv[6], \"LRU\") == 0)\n\t\t\t\tfprintf(_mrc_file,\"%d, %d, %1.7f\\n\", i*bucket, *(int*)_keys->data, *(double*)g_hash_table_lookup(_mrc, _keys->data)*bucket_mrc_lru[i]);\n\t\t\telse\n\t\t\t\tfprintf(_mrc_file,\"%d, %d, %1.7f\\n\", i*bucket, *(int*)_keys->data, *(double*)g_hash_table_lookup(_mrc, _keys->data)*bucket_mrc_lfu[i]);\n\n\t _keys=_keys->next;\n \t}\n\n\n \tfclose(_mrc_file);\n \tg_list_free(_first);\n\n \tg_hash_table_destroy(_mrc);\n\n \t \t//SHARDS_free(_shards);\n \tprintf(\"Finished\");\n\n\t}\n\treturn 0;\n\t\n}\n\n" }, { "alpha_fraction": 0.5901080369949341, "alphanum_fraction": 0.6498010158538818, "avg_line_length": 34.8979606628418, "blob_id": "1dd688b4a4ea243ad237667fd068e8154900f7e8", "content_id": "d508eb02111947e454c68c5c68047c03f6823832", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1759, "license_type": "no_license", "max_line_length": 130, "num_lines": 49, "path": "/shard_msr.sh", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "size=1000000000\nr=0.1\nfor f in src1_0.mrc usr_2.mrc web_2.mrc\ndo\n eval \"./shards_3d_err 20 131072 ${r} $HOME/dev/traces/msr.day/$f.trace ${HOME}/dev/traces/msr.day/${f}.${r}.mrc LRU LFU ${size}\"\n echo \"skip processed $f\"\ndone\n\nfor f in mds_0 proj_2 proj_3 prox_0 prox_1 src1_2 src2_0 src2_2 stg_0 ts_0 wdev_0 web_0 web_1 web_3\ndo\n eval \"./shards_3d_err 20 1024 ${r} $HOME/dev/traces/msr.day/$f.trace ${HOME}/dev/traces/msr.day/${f}.${r}.mrc LRU LFU $size\"\n echo \"procesed ${HOME}/trace/msr.day/${f}.trace\"\ndone\n\nfor f in hm_0 mds_1\ndo\n eval \"./shards_3d_err 20 2048 ${r} $HOME/dev/traces/msr.day/$f.trace ${HOME}/dev/traces/msr.day/${f}.${r}.mrc LRU LFU $size\"\n echo \"procesed ${HOME}/trace/msr.day/${f}.trace\"\ndone\n\nfor f in proj_4\ndo\n eval \"./shards_3d_err 20 4096 ${r} $HOME/dev/traces/msr.day/$f.trace ${HOME}/dev/traces/msr.day/${f}.${r}.mrc LRU LFU $size\"\n echo \"procesed ${HOME}/trace/msr.day/${f}.trace\"\ndone\n\nfor f in prn_1\ndo\n eval \"./shards_3d_err 20 65536 ${r} $HOME/dev/traces/msr.day/$f.trace ${HOME}/dev/traces/msr.day/${f}.${r}.mrc LRU LFU $size\"\n echo \"procesed ${HOME}/trace/msr.day/${f}.trace\"\ndone\n\nfor f in prn_0 usr_1\ndo\n eval \"./shards_3d_err 20 32768 ${r} $HOME/dev/traces/msr.day/$f.trace ${HOME}/dev/traces/msr.day/${f}.${r}.mrc LRU LFU $size\"\n echo \"procesed ${HOME}/trace/msr.day/${f}.trace\"\ndone\n\nfor f in proj_0 usr_0 \ndo\n eval \"./shards_3d_err 20 16384 ${r} $HOME/dev/traces/msr.day/$f.trace ${HOME}/dev/traces/msr.day/${f}.${r}.mrc LRU LFU $size\"\n echo \"procesed ${HOME}/trace/msr.day/${f}.trace\"\ndone\n\nfor f in proj_1 src1_1 \ndo\n eval \"./shards_3d_err 20 8192 ${r} $HOME/dev/traces/msr.day/$f.trace ${HOME}/dev/traces/msr.day/${f}.${r}.mrc LRU LFU $size\"\n echo \"procesed ${HOME}/trace/msr.day/${f}.trace\"\ndone\n" }, { "alpha_fraction": 0.6149433851242065, "alphanum_fraction": 0.6270188689231873, "avg_line_length": 25.825910568237305, "blob_id": "935be920c861334034a97417791969f9a0d0da9a", "content_id": "2bb45bb565e0cc2fe3e7a09f0c5a2d24b7239859", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 6625, "license_type": "no_license", "max_line_length": 179, "num_lines": 247, "path": "/src/dist.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#include <glib.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <inttypes.h>\n#include <stdint.h>\n#include <stdbool.h>\n#include <string.h>\n#include \"splay.h\"\n\nunsigned int calc_reuse_dist( char *object, unsigned int num_obj, GHashTable **time_table, Tree **tree);\n\t\t\t\t\t\t\nvoid update_dist_table(uint64_t reuse_dist, GHashTable **dist_table);\n\nGHashTable *MRC(GHashTable **dist_table);\n\nint intcmp(const void *x, const void *y);\n\n\n/*\n*\n*\n*\n*/\n\nint main(int argc, char *argv[]){\n \t\n\tGList *keys;\n\t\n\tTree *tree =NULL;\n\tGHashTable *time_table = g_hash_table_new_full(g_str_hash, g_str_equal, ( GDestroyNotify )free, ( GDestroyNotify )free);\n\tGHashTable *dist_table = g_hash_table_new_full(g_int_hash, g_int_equal, ( GDestroyNotify )free, ( GDestroyNotify )free);\n\t\n\tint obj_length = (int) strtol(argv[1], NULL, 10);\n\t//timestamp es el valor q se lee de la hashtable\t\n\t\n\t\n\t\n\tunsigned int num_obj=1;\n\t\n\t\n\tchar *object= malloc((obj_length+2)*sizeof(char));\n\n\t//char *time_table_value = (char*) malloc(15*sizeof(char));\n\t\n\tunsigned int reuse_dist=0;\t\n\t\n\t\n\tFILE *file;\n\tfile = fopen(argv[2], \"r\");\n \n\twhile(fgets(object, obj_length+2, file)!=NULL){\n\t\tobject[11]='\\0';\t\n\t\t\n\t\t\n\t\t//sprintf(\"Objeto #%7u: %12s \\n\", num_obj, object);\n\t\n\t\t//Calculate Reuse distance\n\t\treuse_dist = calc_reuse_dist( object, num_obj, &time_table, &tree);\n\n\t\tupdate_dist_table(reuse_dist , &dist_table);\t\n\n\t\t//printf(\"%u \\n\", reuse_dist);\n\t\t\n\t\t\n\t\t\t\n\t\t//tmp_str = (char*) malloc(15*sizeof(char));\n\t\t\n\t\tobject= (char*) malloc((obj_length+2)*sizeof(char));\n\t\t// num_obj++ solo debe pasar si se cumple la condicion hash(Li)% P <= T !!!!!!! \n\t\tnum_obj++;\t\n\t}\n\tprintf(\"\\n\\n\");\t\n\t//sprinttree(tree, 2);\n\n\tprintf(\"\\n\\n\");\t\n\t/*\n\tGHashTableIter iter;\n\tvoid *key, *value;\n\t\n\tg_hash_table_iter_init (&iter, time_table);\n\twhile (g_hash_table_iter_next (&iter, &key, &value))\n \t{\n \t\tprintf(\"Objeto: %7s; Ultimo timestamp: %5s \\n\", (char*)key, (char*)value);\n \t}\t\n\n\tprintf(\"\\n\\n\");\n\t\n\thistograma = g_hash_table_get_keys(distance_table);\n\t\n\thistograma =g_list_sort (histograma, (GCompareFunc)numerical_strcmp);\n\tGList *iterador = histograma;\n\t\n\t//Imprimir el histograma de distancias de reuso\n\t\t\n\twhile( 1 ){\n\t\t\n\t\tprintf(\"Distancia de reuso: %s ; Cantidad: %s \\n\", (char*)iterador->data, (char*)g_hash_table_lookup(distance_table, iterador->data));\n\t\titerador = iterador->next;\n\t\tif(iterador==NULL){\n\t\t\tbreak;\n\t\t}\n\t}\t\n\t\n\tprintf(\"%s \\n\\n\\n\", (char*)histograma->data);\n\t*/\n\tFILE *file2;\n\tfile2 = fopen(argv[3], \"w\");\n\n\tGHashTable *miss_rates = MRC(&dist_table);\n\tkeys = g_hash_table_get_keys (miss_rates);\n\tkeys = g_list_sort (keys ,(GCompareFunc) intcmp );\n\n\twhile(1){\t\n\t\t//fprintf(file2, \"%d %d\\n\", *(int*)(keys->data), *(int*)( g_hash_table_lookup(dist_table, keys->data) ) );\n\t\tfprintf(file2, \"%d %f \\n\", *(int*)(keys->data), *(double*)( g_hash_table_lookup(miss_rates, (int*)keys->data) ) );\t\n\t\t//printf( \"%d %f \\n\", *(int*)(keys->data), *(double*)( g_hash_table_lookup(miss_rates, (int*)keys->data) ));\n\t\t//printf( \"%d %d p1: %p p2: %p\\n\", *(int*)(keys->data), *(int*)( g_hash_table_lookup(dist_table, keys->data) ),(keys->data), ( g_hash_table_lookup(dist_table, keys->data) ) );\n\t\tif(keys->next ==NULL){\n\t\t\tbreak;\n\t\t}\n\t\tkeys= keys->next;\t\t\n\t}\n\tfclose(file);\n\tfclose(file2);\n\tfreetree(tree);\n\tkeys = g_list_first(keys);\n\tg_list_free(keys);\n\tg_hash_table_destroy(time_table);\n\tg_hash_table_destroy(dist_table);\n\tg_hash_table_destroy(miss_rates);\n\n\treturn 0;\n\t\n}\n\n\n\nunsigned int calc_reuse_dist(char *object, unsigned int num_obj, GHashTable **time_table, Tree **tree){\n\n\t\t\n\tunsigned int reuse_dist=0;\n\n\tunsigned int *time_table_value =(unsigned int*) g_hash_table_lookup(*time_table, object);\n\tunsigned int* num_obj_ptr = malloc(sizeof(unsigned int));\n\t*num_obj_ptr = num_obj;\n\t//snprintf(num_obj_str,15*sizeof(char), \"%u\", num_obj);\n\n\tif(time_table_value==NULL){\n\n\t\tg_hash_table_insert(*time_table, object, num_obj_ptr);\n\t\t*tree = insert(num_obj ,*tree);\n\t\treuse_dist=0;\n\n\t}else{\n\t\t//timestamp = strtol(time_table_value,NULL,10);\n\t\treuse_dist =(uint64_t) calc_distance( *time_table_value,*tree);\n\t\t\t\t//Busquemos la distancia de reuso en la hashtable distance_table\n\t\t\t\t//snprintf(reuse_dist_str, 15*sizeof(char), \"%\"PRIu64\"\", reuse_dist);\n\t\t\t\t//printf(\"%u \\n\", reuse_dist);\n\t\t\t\t//delete old timestamp from tree\n\t\t*tree = delete(*time_table_value ,*tree);\n\t\t\t\t\t\n\t\t\t\t//Insert new timestamp from tree\n\t\t*tree = insert(num_obj ,*tree);\n\t\tg_hash_table_insert(*time_table, object, num_obj_ptr);\t\n\t}\n\n\t//printf(\"num_obj_ptr: %u \\n\", *num_obj_ptr);\n\t\n\treturn reuse_dist;\n}\n\nvoid update_dist_table(uint64_t reuse_dist, GHashTable **dist_table){\n\t\t\n\t\tuint64_t *x = (uint64_t*) g_hash_table_lookup(*dist_table, &reuse_dist);\n\t\t\n\t\tif(x == NULL){\n\t\t\t//printf(\"11111\\n\");\n\t\t\tx = (uint64_t*)malloc(sizeof(uint64_t));\n\t\t\t*x = 1;\n\t\t\t\n\t\t\t//printf(\"x[0]: %\"PRIu64\" x[1]: %\"PRIu64\"\\n\",x[0],x[1]);\n\t\t\tint *dist = (int*)malloc(sizeof(uint64_t));\n\t\t\t*dist = reuse_dist;\n\t\t\tg_hash_table_insert(*dist_table, dist, x);\n\t\t\t//printf(\"hashtable value: %d\\n\", *(int*)g_hash_table_lookup(*dist_table, &reuse_dist));\n\n\t\t}else{\n\t\t\t*x= *x + 1;\n\t\t}\n}\n\nGHashTable *MRC(GHashTable **dist_table){\n\n\tGList *keys = g_hash_table_get_keys(*dist_table);\n\tGHashTable *tabla = g_hash_table_new_full(g_int_hash, g_int_equal, (GDestroyNotify)free, (GDestroyNotify)free);\n\tkeys = g_list_sort(keys, (GCompareFunc) intcmp );\n\t\n\n\tdouble *missrate = NULL;\n\tint *cache_size = NULL;\n\tunsigned int total_sum = *(int*)(g_hash_table_lookup(*dist_table, keys->data) );\n\t//printf(\"TOTAL SUM: %u \\n\", total_sum);\n\tunsigned int part_sum = 0;\n\tkeys = keys->next;\n\twhile(1){\t\n\t\tcache_size = malloc(sizeof(int));\t\t\t\n\t\tmissrate = malloc(sizeof(double));\n\t\tpart_sum = part_sum + *(int*)(g_hash_table_lookup(*dist_table, keys->data) );\n\t\t//printf(\"PART SUM: %u \\n\", part_sum);\n\t\t*missrate = (double) part_sum;\n\t\t*cache_size = *(int*)(keys->data);\n\t\t//printf(\"%d %f\\n\", *cache_size, *missrate);\n\t\tg_hash_table_insert(tabla, cache_size, missrate);\n\n\t\tif(keys->next ==NULL){\n\t\t\tbreak;\n\t\t}\n\t\tkeys= keys->next;\t\t\n\t}\n\tkeys = g_list_first(keys);\n\ttotal_sum = total_sum + part_sum;\n\t//printf(\"TOTAL SUM: %u \\n\", total_sum);\n\tkeys= keys->next; //ignoring the zero (infinity) reuse dist\n\tmissrate = NULL;\n\twhile(1){\t\n\t\t\n\t\tmissrate = g_hash_table_lookup(tabla, keys->data);\n\t\t*missrate = 1.0 - (*missrate/total_sum);\n\t\t//printf(\"%d %f\\n\", *(int*)keys->data, *missrate);\n\t\t\n\t\tif(keys->next ==NULL){\n\t\t\tbreak;\n\t\t}\n\t\tkeys= keys->next;\t\t\n\t}\t\n\tkeys = g_list_first(keys);\n\tg_list_free (keys);\n\n\treturn tabla;\n}\n\nint intcmp(const void *x, const void *y){\n\tconst int a = *(int*)x;\n\tconst int b = *(int*)y;\n\treturn (a < b) ? -1 : (a > b);\n}" }, { "alpha_fraction": 0.6372932195663452, "alphanum_fraction": 0.6550375819206238, "avg_line_length": 32.25, "blob_id": "65077504ebfd30c7c6c23e30ea4260c4c24def51", "content_id": "151d28ee7a0c8d95495bc72eace98e3ff527f560", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 3325, "license_type": "no_license", "max_line_length": 147, "num_lines": 100, "path": "/Makefile", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "CC=gcc\nCFLAGS= -m64 -g -Wall -I./include `pkg-config --cflags --libs glib-2.0`\nVERSION=-std=c11\nODIR=obj\nAR=ar\nARFLGAS=rcs\n.DEFAULT_GOAL := shards_3d_minisim2\n\t\ndynamic: lib/libSHARDS.so\n\nlib/libSHARDS.so: obj/SHARDS.o obj/shard_utils.o\n\t$(CC) -g -fPIC $(VERSION) -Wextra -pedantic $^ -shared $(CFLAGS) -o lib/libSHARDS.so\n\nstatic: lib/libSHARDS.a\n\nlib/libSHARDS.a: obj/SHARDS.o obj/shard_utils.o\n\t$(AR) $(ARFLAGS) lib/libSHARDS.a $^\n\n\ndynamic: lib/libMINISIM.so\n\nlib/libMINISIM.so: obj/minisim.o\n\t$(CC) -g -fPIC $(VERSION) -Wextra -pedantic $^ -shared $(CFLAGS) -o lib/libMINISIM.so\n\nstatic: lib/libMINISIM.a\n\nlib/libMINISIM.a: obj/minisim.o\n\t$(AR) $(ARFLAGS) lib/libMINISIM.a $^\n\n# The -I./include from the CFLAGS is not really needed in thi step, as the header file for libSHARDS.so was already included en the shards_test.o .\n# However it won't hurt the compiling process, which is why we leave that flag in the CFLAGS variable and use it here.\n\nshards_test: obj/shards_test.o lib/libSHARDS.a\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\nshards_3d_layer1: obj/shards_3d_layer1.o lib/libSHARDS.a\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\nshards_3d: obj/shards_3d.o lib/libSHARDS.a\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\nshards_3d_err: obj/shards_3d_err.o lib/libSHARDS.a\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\nshards_test2: obj/shards_test.o lib/libSHARDS.so\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\nshards_3d_minisim: obj/shards_3d_minisim.o lib/libSHARDS.so lib/libMINISIM.so\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\nshards_3d_minisim2: obj/shards_3d_minisim2.o lib/libSHARDS.so lib/libMINISIM.so\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\nshards_3d_talus_mini2: obj/shards_3d_talus_mini2.o lib/libSHARDS.so lib/libMINISIM.so\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\nshards_3d_talus_mini: obj/shards_3d_talus_mini.o lib/libSHARDS.so lib/libMINISIM.so\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\nminisim_test: obj/minisim_test.o lib/libSHARDS.so lib/libMINISIM.so\n\t$(CC) -g $^ $(CFLAGS) $(VERSION) -o $@\n\n\n\nobj/shards_test.o: src/shards_test.c\n\t$(CC) -g -c src/shards_test.c $(CFLAGS) $(VERSION) -o $@\nobj/shards_3d_layer1.o: src/shards_3d_layer1.c\n\t$(CC) -g -c src/shards_3d_layer1.c $(CFLAGS) $(VERSION) -o $@\n\nobj/shards_3d.o: src/shards_3d.c\n\t$(CC) -g -c src/shards_3d.c $(CFLAGS) $(VERSION) -o $@\n\nobj/shards_3d_minisim.o: src/shards_3d_minisim.c\n\t$(CC) -g -c src/shards_3d_minisim.c $(CFLAGS) $(VERSION) -o $@\n\nobj/shards_3d_minisim2.o: src/shards_3d_minisim2.c\n\t$(CC) -g -c src/shards_3d_minisim2.c $(CFLAGS) $(VERSION) -o $@\n\nobj/shards_3d_talus_mini2.o: src/shards_3d_talus_mini2.c\n\t$(CC) -g -c src/shards_3d_talus_mini2.c $(CFLAGS) $(VERSION) -o $@\n\nobj/shards_3d_talus_mini.o: src/shards_3d_talus_mini.c\n\t$(CC) -g -c src/shards_3d_talus_mini.c $(CFLAGS) $(VERSION) -o $@\n\nobj/shards_3d_err.o: src/shards_3d_err.c\n\t$(CC) -g -c src/shards_3d_err.c $(CFLAGS) $(VERSION) -o $@\n\nobj/SHARDS.o : src/SHARDS.c\n\t$(CC) -g -fPIC -c $(CFLAGS) $(LFLAGS) src/SHARDS.c -o $@\n\nobj/minisim.o : src/minisim.c\n\t$(CC) -g -fPIC -c $(CFLAGS) $(LFLAGS) src/minisim.c -o $@\n\nobj/shard_utils.o: src/shards_utils.c\n\t$(CC) -g -fPIC -c $(CFLAGS) $(LFLAGS) src/shards_utils.c -o $@\n\nobj/minisim_test.o: src/minisim_test.c\n\t$(CC) -g -fPIC -c $(CFLAGS) $(LFLAGS) src/minisim_test.c -o $@\nclean:\n\trm ./obj/*.o\n" }, { "alpha_fraction": 0.752444863319397, "alphanum_fraction": 0.7612655758857727, "avg_line_length": 49.63106918334961, "blob_id": "2cd8c475c9cd5fa367e7948b9e1be86c8b57adcd", "content_id": "2a0489c1bf8d0643e6b2c8f92ae1085afb5bfd22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5215, "license_type": "no_license", "max_line_length": 450, "num_lines": 103, "path": "/README.md", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "See [README-Z.md](https://github.com/lzhzero/SHARDS-C/blob/master/README-Z.md) for details on how to process block traces.\n\n# SHARDS-C\n\nThis is an implementation of the SHARDS algorithm written in C. It is used to estimate the Miss Rate Curve for a cache using a stack algorithm (LRU, LFU, MRU but this implementation works only with LRU) as a eviction policy. To read more on SHARDS, check out this [link](https://www.usenix.org/system/files/conference/fast15/fast15-paper-waldspurger.pdf).\n\nIt uses [GLib](https://developer.gnome.org/glib/stable/) for most of the data structures.\n\nIt uses an implementation of the top-down splaying binary tree with sizes ( D. Sleator <[email protected]>, January 1994.), taken from the [PARDA repository](https://bitbucket.org/trauzti/parda). I modified it further so that it could calculate the sum of the sizes from the search node to the right most node. It also uses the [qLibc](https://github.com/wolkykim/qlibc) imlementation of the 128-bit murmurhash3 function.\n\nThis work was done as part of a larger project for [Dr. Cristina Abad](https://sites.google.com/site/cristinaabad/). \n\n### TODO\n\n- [ ] Implement the SHARDS_adj, to deal with the error produced by vertical shifts of the estimated curve.\n- [ ] Implement a function that returns the updated reuse distance distribution from the SHARDS_fixed_size version, in case someone cares about reuse distances and not the MRC.\n- [X] Restructure the project as a shared and a static library.\n\n## Installation\nSHARDS-C needs gcc 5 or greater to compile and [GLib](https://developer.gnome.org/glib/stable/) as a dependency.\n\nSHARDS-C can be compiled as a static and a dynamic library. To get either version write in the command line:\n\n```\n\tmake dynamic\n```\nor\n```\n\tmake static\n```\nThe appropiate library file (libSHARDS.so or libSHARDS.a) will reside in the lib/ directory. Afterwards you can either copy the library file and the SHARDS.h to the standard path:\n\n```\n\tsudo cp lib/libSHARDS.* /usr/local/lib\n\tsudo cp include/SHARDS.h /usr/local/include\n\tsudo cp include/shards_utils.h /usr/local/include\n```\n\nAlternatively, just put the files in a folder (or leave them in /lib/) and add that directory to LIBRARY_PATH (for the static library, libSHARDS.a) and/or to LD_LIBRARY_PATH (for the shared library, libSHARDS.so). With this method you will have to include the SHARDS.h and shards_utils.h files in the directory of the project that will use this library.\n\n## Instructions on how to use SHARDS\n\nTo construct a Miss Rate Curve using SHARDS, you first need to declare a SHARDS element and initialize it using one of the three initilization functions. There are two versions of SHARDS: Fixed-Rate and Fixed-Size. Generally speaking you want to use the Fixed-Size version, as it has a limit on the amount of memory it uses; the other version I implemented in order to understand the algorithm. To initialize a SHARDS Fixed-Size structure you write:\n```\n\tSHARDS* shards = SHARDS* SHARDS_fixed_size_init(16000, 10, String);\n```\n\nThis gives you a SHARDS structure that allows a maximum of 16000 unique objects to be accepted before triggering eviction, has a bucket size of 10 for the reuse distance histogram and works with objects of type String (so char* basically). This version sets the value of R_initial to 0.1. If you want to use a different value (for example 0.5), you can use the alternative initialization function:\n\n```\n\tSHARDS* shards = SHARDS_fixed_size_init_R(16000, 0.5, 10, String);\t\n```\n\nNow that the SHARDS element is initialized, you just need to feed it the objects that go into the cache. To do this, you initialize a pointer of the type that the SHARDS structure accepts (in this case String, meaning char* ), with the value of the object. Afterwards you use the SHARDS_feed_obj to have the shards structure analyze the object.\n\n```\n\tint length = ...;//Length of the object in bytes\n\tchar * obj = malloc(length* sizeof(char)); \n\n\t... // Here you set the objects value\n\n\tSHARDS_feed_obj(shards, obj, length);\n```\n\nOnce you finished feeding the objects of the workload, you can construct the MRC by using the following function: \n\n```\n\tMRC(shards);\n```\n\nor\n\n```\n\tMRC_empty(shards);\n```\n\nThe first one returns a GHashTable with the key-value pairs [size : MissRate]. The second function does the same, but in addition it erases all the data from the internal data structures, such that you can start analyzing another workload in order to estimate its MRC.\n\nOnce finished, you can free the SHARDS structure with \n\n```\t\n\tSHARDS_free(shards);\n```\n\nThe file shards_test.c has an example of a program that reads a trace from another file and constructs the MRC for it.\n\n\n## Referencing our work\n\nIf you found our tool useful and use it in research, please cite our work as follows:\n\nInstrumenting cloud caches for online workload monitoring \nJorge R. Murillo, Gustavo Totoy, Cristina L. Abad \n16th Workshop on Adaptive and Reflective Middleware (ARM), co-located with ACM/IFIP/USENIX Middleware, 2017. \nCode available at: https://github.com/jorgermurillo/SHARDS-C\n\n## Acknowledgements\n\nThis work was funded in part by a Google Faculty Research Award.\n\nThis work was possible thanks to the Amazon Web Services Cloud Credits for Research program.\n\nmiau :cat:\n" }, { "alpha_fraction": 0.589893639087677, "alphanum_fraction": 0.6035904288291931, "avg_line_length": 37.3724479675293, "blob_id": "9821a4f4ec6e47e4151348e77b625dc72c17a64b", "content_id": "6dd858487925720367d569ede04b225b72b45891", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 7520, "license_type": "no_license", "max_line_length": 216, "num_lines": 196, "path": "/src/minisim.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#include \"minisim.h\"\n/*\n * To change this license header, choose License Headers in Project Properties.\n * To change this template file, choose Tools | Templates\n * and open the template in the editor.\n */\n\n/*\n * To change this license header, choose License Headers in Project Properties.\n * To change this template file, choose Tools | Templates\n * and open the template in the editor.\n */\n\n#define new_max(x,y) (((x) >= (y)) ? (x) : (y))\n#define new_min(x,y) (((x) < (y)) ? (x) : (y))\n\nMINISIM * MINISIM_new_init_LRU(unsigned int cache_size) {\n MINISIM *minisim = (MINISIM *) malloc(sizeof (MINISIM));\n minisim->LRU_queue = g_queue_new();\n minisim->LRU_cache_size = cache_size;\n minisim->LRU_list_size = 0;\n minisim->hitcount = 0;\n minisim->count = 0;\n return minisim;\n}\n\nunsigned int LRU_get_obj(MINISIM *minisim, void* object) {\n // printf(object);\n minisim->count++;\n if (minisim->count == 0)\n printf(\"count is 0!!!!!!!!!!!!!!!!\\n\");\n if (g_queue_remove(minisim->LRU_queue, object)) {\n g_queue_push_head(minisim->LRU_queue, object);\n minisim->hitcount++;\n return 1;\n }\n //printf(\"in ARC_get_obj \");\n //printf(object);\n return 0;\n\n}\n\nunsigned int LRU_put_obj(MINISIM *minisim, void* object) {\n g_queue_push_head(minisim->LRU_queue, object);\n minisim->LRU_list_size++;\n\n if (minisim->LRU_list_size > minisim->LRU_cache_size) {\n g_queue_pop_tail(minisim->LRU_queue);\n minisim->LRU_list_size--;\n }\n return 0;\n}\n\nMINISIM * MINISIM_new_init_ARC(unsigned int cache_size) {\n MINISIM *minisim = (MINISIM *) malloc(sizeof (MINISIM));\n minisim->ARC_cache_size = cache_size;\n minisim->ARC_items_cached = 0;\n minisim->ARC_cached = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);\n minisim->hitcount = 0;\n minisim->count = 0;\n //minisim->ARC_p = 0.0;\n minisim->ARC_p = 0;\n\n minisim->ARC_t1 = g_queue_new();\n minisim->ARC_t2 = g_queue_new();\n minisim->ARC_b1 = g_queue_new();\n minisim->ARC_b2 = g_queue_new();\n\n return minisim;\n}\n\nunsigned int ARC_replace_obj(MINISIM *minisim, void* object) {\n unsigned int old;\n if (g_queue_get_length(minisim->ARC_t1) != 0 && ((g_queue_index(minisim->ARC_b2, object) != -1 && g_queue_get_length(minisim->ARC_t1) == minisim->ARC_p) || g_queue_get_length(minisim->ARC_t1) > minisim->ARC_p)) {\n //if (((g_queue_index(minisim->ARC_b2, object) != -1 && g_queue_get_length(minisim->ARC_t1) == minisim->ARC_p) || g_queue_get_length(minisim->ARC_t1) > minisim->ARC_p)) {\n old = GPOINTER_TO_UINT(g_queue_pop_head(minisim->ARC_t1));\n g_queue_push_tail(minisim->ARC_b1, GUINT_TO_POINTER(old));\n } else {\n old = GPOINTER_TO_UINT(g_queue_pop_head(minisim->ARC_t2));\n g_queue_push_tail(minisim->ARC_b2, GUINT_TO_POINTER(old));\n }\n g_hash_table_remove(minisim->ARC_cached, GUINT_TO_POINTER(old));\n return 0;\n}\n\nunsigned int ARC_get_obj(MINISIM *minisim, void* object) {\n // printf(object);\n\n minisim->count++;\n if (g_queue_remove(minisim->ARC_t1, object)) {\n g_queue_push_tail(minisim->ARC_t2, object);\n minisim->hitcount++;\n return 1;\n } else if (g_queue_remove(minisim->ARC_t2, object)) {\n g_queue_push_tail(minisim->ARC_t2, object);\n minisim->hitcount++;\n return 1;\n }\n //printf(\"in ARC_get_obj \");\n //printf(object);\n return 0;\n}\n\nunsigned int ARC_put_obj(MINISIM *minisim, void* object) {\n int _max;\n if (g_hash_table_contains(minisim->ARC_cached, object))\n return 0;\n //printf(\"arc put here, hash table size is %d \\n\", g_hash_table_size(minisim->ARC_cached));\n //printf(\"arc put here, t1 size is %d \\n\", g_queue_get_length(minisim->ARC_t1));\n\n g_hash_table_insert(minisim->ARC_cached, object, GUINT_TO_POINTER(1));\n\n if (g_queue_index(minisim->ARC_b1, object) != -1) {\n //printf(\"old p is %d\\n\", minisim->ARC_p);\n //minisim->ARC_p = new_min(minisim->ARC_cache_size, minisim->ARC_p + new_max((double) g_queue_get_length(minisim->ARC_b2) / (double) g_queue_get_length(minisim->ARC_b1), 1.0));\n _max = new_max(g_queue_get_length(minisim->ARC_b2) / g_queue_get_length(minisim->ARC_b1), 1);\n minisim->ARC_p = new_min(minisim->ARC_cache_size, minisim->ARC_p + _max);\n //printf(\"new p is %d\\n\", minisim->ARC_p);\n ARC_replace_obj(minisim, object);\n g_queue_remove(minisim->ARC_b1, object);\n g_queue_push_tail(minisim->ARC_t2, object);\n return 0;\n }\n if (g_queue_index(minisim->ARC_b2, object) != -1) {\n //printf(\"old p is %d\\n\", minisim->ARC_p);\n _max = new_max(g_queue_get_length(minisim->ARC_b1) / g_queue_get_length(minisim->ARC_b2), 1);\n //minisim->ARC_p = new_max(0, minisim->ARC_p - new_max((double) g_queue_get_length(minisim->ARC_b1) / (double) g_queue_get_length(minisim->ARC_b2), 1.0));\n minisim->ARC_p = new_max(0, (minisim->ARC_p - _max));\n //printf(\"p - max(b1/b2, 1)\\n\");\n //printf(\"b2 size is %d\\n\", g_queue_get_length(minisim->ARC_b2));\n //printf(\"b1 size is %d\\n\", g_queue_get_length(minisim->ARC_b1));\n //printf(\"new p is %d\\n\", minisim->ARC_p);\n ARC_replace_obj(minisim, object);\n g_queue_remove(minisim->ARC_b2, object);\n g_queue_push_tail(minisim->ARC_t2, object);\n return 0;\n }\n if (g_queue_get_length(minisim->ARC_t1) + g_queue_get_length(minisim->ARC_b1) == minisim->ARC_cache_size) {\n if (g_queue_get_length(minisim->ARC_t1) < minisim->ARC_cache_size) {\n g_queue_pop_head(minisim->ARC_b1);\n ARC_replace_obj(minisim, object);\n } else {\n g_hash_table_remove(minisim->ARC_cached, g_queue_pop_head(minisim->ARC_t1));\n //ARC_replace_obj(minisim, object);\n }\n } else {\n int total = g_queue_get_length(minisim->ARC_t1) + g_queue_get_length(minisim->ARC_t2) + g_queue_get_length(minisim->ARC_b1) + g_queue_get_length(minisim->ARC_b2);\n if (total >= minisim->ARC_cache_size) {\n if (total == 2 * minisim->ARC_cache_size)\n g_queue_pop_head(minisim->ARC_b2);\n ARC_replace_obj(minisim, object);\n }\n }\n //printf(\"push tail t1\\n\");\n g_queue_push_tail(minisim->ARC_t1, object);\n //printf(\"t1 find obj is %d\\n\", g_queue_remove(minisim->ARC_t1, object));\n\n return 0;\n}\n\nunsigned int MQ_get_obj(MINISIM *minisim, void* object) {\n\n return 0;\n}\n\nunsigned int MQ_put_obj(MINISIM *minisim, void* object) {\n\n return 0;\n}\n\nunsigned int MINISIM_get_obj(MINISIM *minisim, unsigned obj_int, size_t nbytes, char* algo, int level) {\n if (strcmp(algo, \"ARC\") == 0)\n return ARC_get_obj(minisim, GUINT_TO_POINTER(obj_int));\n else if (strcmp(algo, \"MQ\") == 0)\n return MQ_get_obj(minisim, GUINT_TO_POINTER(obj_int));\n else if (strcmp(algo, \"LRU\") == 0)\n return LRU_get_obj(minisim, GUINT_TO_POINTER(obj_int));\n else {\n\n printf(\"no matching algorithm\");\n exit(0);\n }\n}\n\nunsigned int MINISIM_put_obj(MINISIM *minisim, unsigned int obj_int, size_t nbytes, char* algo, int level) {\n if (strcmp(algo, \"ARC\") == 0)\n return ARC_put_obj(minisim, GUINT_TO_POINTER(obj_int));\n else if (strcmp(algo, \"MQ\") == 0)\n return MQ_put_obj(minisim, GUINT_TO_POINTER(obj_int));\n else if (strcmp(algo, \"LRU\") == 0)\n return LRU_put_obj(minisim, GUINT_TO_POINTER(obj_int));\n else {\n printf(\"no matching algorithm\");\n exit(0);\n }\n}" }, { "alpha_fraction": 0.699999988079071, "alphanum_fraction": 0.7330935001373291, "avg_line_length": 28.4255313873291, "blob_id": "92e9595650de7048ddcf35e1eba7242a32139964", "content_id": "0300d3ee19bb4ab33d6508e72f1f0829a7fe413f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 171, "num_lines": 47, "path": "/README-Z.md", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#Run SHARDS algorithm on block trace files.\n\nTrace files can be obtained here http://iotta.snia.org/traces/388\n\n## TODO:\nprocess write entries in blktrace file.Currently only read entries is processed.\n\n\n## Installation\nglib-2.0 is required\npandas is required for python\n```\nmake\nmake shards_test\nmake shards_3d\n```\n\n## Convert block trace file\n\nA read record in block trace may span over multiple cache blocks. In order for SHARDS program to work, we need to first convert read IOs into list ofcache blocks accessed.\n```\nconvert_data.py blktrace list.cacheblock\n```\n\n## Feed the list into shard program\n```\n./shards_test 20 1000 1.0 list.cacheblock output.mrc.lru LRU\n```\nFollowing is the definition of the argvs\n```\n/*\targv[1] = length of each object. \"trival argv, just make sure the number is larger than length of each line of input file\"\n\targv[2] = bucket size \"defines the granularity of mrc chart\"\n\targv[3] = R \"down sampling factor for faster execution, key idea of the SHARDS paper.\"\n\targv[4] = Tracefile \"input file\"\n\targv[5] = mrc file \"output file\"\n argv[6] = eviction policy flag e.g. \"LRU\" \"LFU\"\n*/\n```\nshards_3d:\nCurrently implemented first layer with input generation for 2nd layer.\nNext step is to process the input file format and feed to 2nd layer.\n\nshards_3d_layer1 20 131072 1.0 input.prn_0.csv o1 LRU\n'''\n131072 means bucket size is 512MB. \n'''\nZhang Liu\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5379644632339478, "alphanum_fraction": 0.5638126134872437, "avg_line_length": 28.428571701049805, "blob_id": "879c36ffb3f2966329fabdbe90702a52766e7126", "content_id": "e81dc9d542476605f64c60c5114a52e99aa0f69b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 619, "license_type": "no_license", "max_line_length": 94, "num_lines": 21, "path": "/convert_data_batch.py", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport sys\n\nif len(sys.argv) != 3:\n print(\"require 2 args, e.g. convert_data.py input.csv output.csv\")\n exit()\nelse:\n in_file = sys.argv[1]\n out_file = sys.argv[2]\ndf = pd.read_csv(in_file, compression='gzip', usecols=[3,4,5], names=['io','location','size'])\nf = open(out_file,\"w\")\nfor index, row in df.iterrows():\n if row['io'] == \"Read\":\n begin = int(row['location']/4096)\n end = int((row['location']+row['size'])/4096)+1\n temp = []\n for i in range(begin, end):\n temp.append(str(i))\n f.write('\\n'.join(temp) )\nf.write(\"\\n\")\nf.close()\n\n" }, { "alpha_fraction": 0.5532984733581543, "alphanum_fraction": 0.5633761286735535, "avg_line_length": 32.37490463256836, "blob_id": "49ddbe70e1052f4199d5439e01754960be9de8f9", "content_id": "6f969ca2b174c34f2123eb8c48f3f9100f2f273c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 44157, "license_type": "no_license", "max_line_length": 223, "num_lines": 1323, "path": "/src/SHARDS.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#include \"SHARDS.h\"\n\n/*\n This function calculates the resude distance of a reference to an object in the trace/workload. Reuse distance is the number of unique objects in a trace between\n the new reference and the last reference to the same object. Objects appearing for the first time have a reuse distance of infinity (here represented by zero).\n All reuse distances are positive.\n\n For example:\n\n a \tb \tc \ta \td \td \tb\n 0(inf)\t0(inf)\t0(inf)\t3\t\t0(inf)\t1\t\t4\n\n The character above represent a workload/trace, the values below are the reuse distnce of each of the references.\n\n */\n#define new_max(x,y) ((x) >= (y)) ? (x) : (y)\n\nunsigned int calc_reuse_dist(void *object, unsigned int num_obj, GHashTable **time_table, Tree **tree, shards_version version) {\n\n\n unsigned int reuse_dist = 0;\n\n unsigned int *time_table_value = (unsigned int*) g_hash_table_lookup(*time_table, object);\n unsigned int* num_obj_ptr = (unsigned int*) malloc(sizeof (unsigned int));\n *num_obj_ptr = num_obj;\n //snprintf(num_obj_str,15*sizeof(char), \"%u\", num_obj);\n\n if (time_table_value == NULL) {\n\n g_hash_table_insert(*time_table, object, num_obj_ptr);\n *tree = insert(num_obj, *tree);\n reuse_dist = 0;\n\n } else {\n //timestamp = strtol(time_table_value,NULL,10);\n reuse_dist = (uint64_t) calc_distance(*time_table_value, *tree);\n\n //Busquemos la distancia de reuso en la hashtable distance_table\n //snprintf(reuse_dist_str, 15*sizeof(char), \"%\"PRIu64\"\", reuse_dist);\n //printf(\"%u \\n\", reuse_dist);\n //delete old timestamp from tree\n *tree = deletetree(*time_table_value, *tree);\n\n //Insert new timestamp from tree\n *tree = insert(num_obj, *tree);\n g_hash_table_insert(*time_table, object, num_obj_ptr);\n if (version == FIXED_RATE) {\n free(object);\n }\n\n }\n\n //printf(\"num_obj_ptr: %u \\n\", *num_obj_ptr);\n\n return reuse_dist;\n}\n\n/*FIFO implementation*/\nunsigned int calc_reuse_dist_FIFO(void *object, unsigned int num_obj, GHashTable **time_table, Tree **tree, shards_version version) {\n\n\n unsigned int reuse_dist = 0;\n\n unsigned int *time_table_value = (unsigned int*) g_hash_table_lookup(*time_table, object);\n unsigned int* num_obj_ptr = (unsigned int*) malloc(sizeof (unsigned int));\n *num_obj_ptr = num_obj;\n //snprintf(num_obj_str,15*sizeof(char), \"%u\", num_obj);\n\n if (time_table_value == NULL) {\n\n g_hash_table_insert(*time_table, object, num_obj_ptr);\n *tree = insert(num_obj, *tree);\n reuse_dist = 0;\n\n } else {\n //timestamp = strtol(time_table_value,NULL,10);\n reuse_dist = (uint64_t) calc_distance(*time_table_value, *tree);\n\n //Busquemos la distancia de reuso en la hashtable distance_table\n //snprintf(reuse_dist_str, 15*sizeof(char), \"%\"PRIu64\"\", reuse_dist);\n //printf(\"%u \\n\", reuse_dist);\n //delete old timestamp from tree\n *tree = deletetree(*time_table_value, *tree);\n\n //Insert new timestamp from tree\n *tree = insert(num_obj, *tree);\n g_hash_table_insert(*time_table, object, num_obj_ptr);\n if (version == FIXED_RATE) {\n free(object);\n }\n\n }\n\n //printf(\"num_obj_ptr: %u \\n\", *num_obj_ptr);\n\n return reuse_dist;\n}\n\n/*LFU implementation*/\nunsigned int calc_reuse_dist_LFU(void *object, uint64_t T_i, GHashTable **freq_table, Tree_lfu **freq_tree, shards_version version) {\n\n\n unsigned int reuse_dist = 0;\n\n unsigned int *freq_table_value = (unsigned int*) g_hash_table_lookup(*freq_table, object);\n unsigned int* freq_obj_ptr = (unsigned int*) malloc(sizeof (unsigned int));\n *freq_obj_ptr = 1;\n //snprintf(num_obj_str,15*sizeof(char), \"%u\", num_obj);\n\n if (freq_table_value == NULL) {\n\n g_hash_table_insert(*freq_table, object, freq_obj_ptr);\n *freq_tree = insert_lfu(*freq_obj_ptr, *freq_tree, T_i); // first var is frequency of 1 for new objs\n reuse_dist = 0;\n\n } else {\n //timestamp = strtol(time_table_value,NULL,10);\n reuse_dist = (uint64_t) calc_distance_lfu(*freq_table_value, *freq_tree, T_i);\n //Busquemos la distancia de reuso en la hashtable distance_table\n //snprintf(reuse_dist_str, 15*sizeof(char), \"%\"PRIu64\"\", reuse_dist);\n //printf(\"%u \\n\", reuse_dist);\n //delete old timestamp from tree\n *freq_tree = deletetree_lfu(*freq_table_value, *freq_tree, T_i);\n\n *freq_obj_ptr = *freq_table_value + 1;\n\n //Insert new timestamp from tree\n *freq_tree = insert_lfu(*freq_obj_ptr, *freq_tree, T_i);\n g_hash_table_insert(*freq_table, object, freq_obj_ptr);\n if (version == FIXED_RATE) {\n free(object);\n }\n\n }\n\n //printf(\"num_obj_ptr: %u \\n\", *num_obj_ptr);\n\n return reuse_dist;\n}\n\n/*MQ implementation*/\nunsigned int calc_reuse_dist_MQ(void *object___, SHARDS ** shards, TNODE **RT_i, GHashTable **mq_tick_table, GHashTable **mq_freq_table, Tree_mq **dist_tree_mq, GList **mq_out, T tick, T life_time, shards_version version) {\n\n //(*shards)->tick++;\n\n unsigned int reuse_dist = 0;\n\n gpointer gp1 = g_hash_table_lookup(*mq_freq_table, GUINT_TO_POINTER((*RT_i)->T));\n //unsigned int *freq_table_value = (unsigned int*) g_hash_table_lookup(*mq_freq_table, (gpointer) (*RT_i)->T);\n unsigned int freq_table_value = GPOINTER_TO_UINT(gp1);\n\n unsigned int freq_obj_value = 1;\n //*freq_obj_ptr = 1;\n //(*RT_i)->freq = 1; already set to 1 in feedobj\n uint64_t tick_obj_value = (*shards)->tick + (*shards)->life_time;\n //snprintf(num_obj_str,15*sizeof(char), \"%u\", num_obj);\n //printf(\"hash table size is %d \\n\", g_hash_table_size(*mq_freq_table));\n //printf(\"T_i =%\" PRId64 \"\\n\", (*RT_i)->T);\n //printf(\"Obj = %s\\n\", (*RT_i)->obj);\n\n if (gp1 == NULL) {\n\n g_hash_table_insert(*mq_freq_table, GUINT_TO_POINTER((*RT_i)->T), GUINT_TO_POINTER(freq_obj_value));\n g_hash_table_insert(*mq_tick_table, GUINT_TO_POINTER((*RT_i)->T), GUINT_TO_POINTER(tick_obj_value));\n *dist_tree_mq = insert_mq(freq_obj_value, *dist_tree_mq, (*RT_i)->T); // first var is frequency of 1 for new objs\n reuse_dist = 0;\n // printf(\"here\\n\");\n } else {\n //printf(\"freq table value is %d \\n\", freq_table_value);\n //timestamp = strtol(time_table_value,NULL,10);\n reuse_dist = (uint64_t) calc_distance_mq(freq_table_value, *dist_tree_mq, (*RT_i)->T);\n // reuse_dist = (uint64_t) calc_distance_mq(1, *dist_tree_mq, (*RT_i)->T);\n //Busquemos la distancia de reuso en la hashtable distance_table\n //snprintf(reuse_dist_str, 15*sizeof(char), \"%\"PRIu64\"\", reuse_dist);\n //printf(\"%u \\n\", reuse_dist);\n //delete old timestamp from tree\n *dist_tree_mq = deletetree_mq(freq_table_value, *dist_tree_mq, (*RT_i)->T);\n\n freq_table_value += 1;\n (*shards)->max_f_mq = new_max((*shards)->max_f_mq, freq_table_value);\n //Insert new timestamp from tree\n *dist_tree_mq = insert_mq(freq_table_value, *dist_tree_mq, (*RT_i)->T);\n g_hash_table_insert(*mq_freq_table, GUINT_TO_POINTER((*RT_i)->T), GUINT_TO_POINTER(freq_table_value));\n g_hash_table_insert(*mq_tick_table, GUINT_TO_POINTER((*RT_i)->T), GUINT_TO_POINTER(tick_obj_value));\n if (version == FIXED_RATE) {\n\n free((*RT_i)->obj);\n }\n\n }\n\n //demote objs\n unsigned int i;\n uint64_t temp_T;\n for (i = 2; i <= (*shards)->max_f_mq; i++) {\n //while (false) {\n temp_T = get_first_from_freq_mq(i, *dist_tree_mq);\n if (temp_T == 0)\n continue;\n //else\n // printf(\"temp_T non zero \\n\");\n uint64_t temp_t = GPOINTER_TO_UINT(g_hash_table_lookup(*mq_tick_table, GUINT_TO_POINTER(temp_T)));\n if (temp_T != (*RT_i)->T && temp_t < (*shards)->tick) {\n //evict to lower tier\n //unsigned int *temp_f = (unsigned int*) g_hash_table_lookup(*mq_freq_table, (gpointer) temp_T);\n *dist_tree_mq = deletetree_mq(i, *dist_tree_mq, temp_T);\n\n g_hash_table_insert(*mq_freq_table, GUINT_TO_POINTER(temp_T), GUINT_TO_POINTER(i - 1));\n *dist_tree_mq = insert_mq(i - 1, *dist_tree_mq, temp_T);\n g_hash_table_insert(*mq_tick_table, GUINT_TO_POINTER(temp_T), GUINT_TO_POINTER(tick_obj_value));\n }\n }\n\n\n //printf(\"num_obj_ptr: %u \\n\", *num_obj_ptr);\n\n return reuse_dist;\n}\n\n/*\n This function updates the reuse distance hashtable for the Fixed-Rate version of SHARDS.\n */\nvoid update_dist_table(uint64_t reuse_dist, GHashTable **dist_table) {\n\n uint64_t *x = (uint64_t*) g_hash_table_lookup(*dist_table, &reuse_dist);\n\n if (x == NULL) {\n //printf(\"11111\\n\");\n x = (uint64_t*) malloc(sizeof (uint64_t));\n *x = 1;\n uint64_t *dist = (uint64_t*) malloc(sizeof (uint64_t));\n *dist = reuse_dist;\n g_hash_table_insert(*dist_table, dist, x);\n //printf(\"hashtable value: %d\\n\", *(int*)g_hash_table_lookup(*dist_table, &reuse_dist));\n\n } else {\n\n *x = *x + 1;\n\n }\n}\n\n/*\n This function updates the reuse distance hashtable in the Fixed-Size version of SHARDS. The difference with the above is that it stores the current value\n of T with the reuse distance.\n */\nvoid update_dist_table_fixed_size(uint64_t reuse_dist, GHashTable **dist_table, uint64_t T_new) {\n\n uint64_t *x = (uint64_t*) g_hash_table_lookup(*dist_table, &reuse_dist);\n uint64_t T_old = 0;\n double tmp = 0;\n if (x == NULL) {\n //printf(\"11111\\n\");\n x = (uint64_t*) malloc(2 * sizeof (uint64_t));\n x[0] = 1;\n x[1] = T_new;\n //printf(\"x[0]: %\"PRIu64\" x[1]: %\"PRIu64\"\\n\",x[0],x[1]);\n int *dist = (int*) malloc(sizeof (uint64_t));\n *dist = reuse_dist;\n g_hash_table_insert(*dist_table, dist, x);\n //printf(\"hashtable value: %d\\n\", *(int*)g_hash_table_lookup(*dist_table, &reuse_dist));\n } else {\n T_old = x[1];\n if (T_old != T_new) {\n\n tmp = ((double) T_new) / T_old;\n x[0] = (uint64_t) x[0] * tmp + 1;\n x[1] = T_new;\n }\n\n *x = *x + 1;\n //printf(\"x[0]: %\"PRIu64\" x[1]: %\"PRIu64\"\\n\",x[0],x[1]);\n\n }\n}\n\nint intcmp_gdirect(const void *x, const void *y) {\n const int a = GPOINTER_TO_INT(x);\n const int b = GPOINTER_TO_INT(y);\n\n return (a < b) ? -1 : (a > b);\n}\n\nint intcmp(const void *x, const void *y) {\n const int a = *(int*) x;\n const int b = *(int*) y;\n\n return (a < b) ? -1 : (a > b);\n}\n\nint uint64cmp(const void *x, const void *y) {\n const uint64_t a = *(uint64_t*) x;\n const uint64_t b = *(uint64_t*) y;\n\n if (a > b) {\n return 1;\n } else if (a < b) {\n return -1;\n } else {\n\n return 0;\n }\n}\n\nguint g_uint64_hash(gconstpointer v) {\n\n return (guint) *(const guint64*) v;\n}\n\ngboolean g_uint64_equal(gconstpointer v1, gconstpointer v2) {\n\n return *((const guint64*) v1) == *((const guint64*) v2);\n}\n\nint doublecmp(const void *x, const void *y) {\n const double a = *(double*) x;\n const double b = *(double*) y;\n\n return (a < b) ? -1 : (a > b);\n}\n\nbool dummy(void* x) {\n\n return true;\n\n}\n\n/*LFU implementation*/\nSHARDS * SHARDS_new_init_LFU(double R_init, unsigned int bucket_size, object_Type type, unsigned int max_setsize, shards_version version) {\n SHARDS *shards = (SHARDS *) malloc(sizeof (SHARDS));\n if (shards == NULL) {\n return NULL;\n }\n shards->version = version;\n shards->dataType = type;\n shards->initial_R_value = R_init; // Not really used in the fixed_rate version.\n shards->R = R_init;\n\n uint64_t tmp = 1;\n tmp = tmp << 24;\n shards->P = tmp;\n\n shards->T = R_init*tmp;\n\n shards->bucket_size = bucket_size;\n switch (type) {\n case String:\n shards->freq_table = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, (GDestroyNotify) free);\n break;\n case Int:\n shards->freq_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, (GDestroyNotify) free);\n break;\n case Uint64:\n shards->freq_table = g_hash_table_new_full(g_uint64_hash, g_uint64_equal, NULL, (GDestroyNotify) free);\n break;\n case Double:\n shards->freq_table = g_hash_table_new_full(g_double_hash, g_double_equal, NULL, (GDestroyNotify) free);\n break;\n\n }\n\n shards->dist_histogram = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, (GDestroyNotify) free);\n shards->set_size = 0;\n shards->evic_obj = 0;\n\n shards->total_objects = 0;\n shards->num_obj = 0;\n shards->fraction = 0;\n shards->S_max = max_setsize;\n\n\n shards->dist_tree = NULL;\n shards->set_tree = NULL;\n shards->set_list = NULL;\n shards->set_list_search = NULL;\n\n shards->dist_tree_lfu = NULL;\n\n if (shards->version == FIXED_SIZE) {\n shards->set_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, (GDestroyNotify) g_list_free);\n\n } else {\n\n shards->set_table = NULL;\n }\n\n return shards;\n};\n\n/*MQ implementation*/\nSHARDS * SHARDS_new_init_MQ(double R_init, unsigned int bucket_size, object_Type type, unsigned int max_setsize, shards_version version) {\n SHARDS *shards = (SHARDS *) malloc(sizeof (SHARDS));\n if (shards == NULL) {\n return NULL;\n }\n shards->version = version;\n shards->dataType = type;\n shards->initial_R_value = R_init; // Not really used in the fixed_rate version.\n shards->R = R_init;\n\n uint64_t tmp = 1;\n tmp = tmp << 24;\n shards->P = tmp;\n\n shards->T = R_init*tmp;\n\n shards->bucket_size = bucket_size;\n switch (type) {\n case String:\n shards->mq_freq_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);\n shards->mq_tick_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, NULL);\n //shards->mq_freq_table = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, (GDestroyNotify) free);\n break;\n case Int:\n shards->mq_freq_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, (GDestroyNotify) free);\n break;\n case Uint64:\n shards->mq_freq_table = g_hash_table_new_full(g_uint64_hash, g_uint64_equal, NULL, (GDestroyNotify) free);\n break;\n case Double:\n shards->mq_freq_table = g_hash_table_new_full(g_double_hash, g_double_equal, NULL, (GDestroyNotify) free);\n break;\n\n }\n // shards->mq_tick_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, (GDestroyNotify) free);\n\n\n shards->dist_histogram = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, (GDestroyNotify) free);\n shards->set_size = 0;\n shards->evic_obj = 0;\n\n shards->total_objects = 0;\n shards->num_obj = 0;\n shards->fraction = 0;\n shards->S_max = max_setsize;\n\n\n shards->dist_tree = NULL;\n shards->set_tree = NULL;\n shards->set_list = NULL;\n shards->set_list_search = NULL;\n\n shards->dist_tree_mq = NULL;\n shards->tick = 0;\n shards->life_time = 10000 * R_init;\n shards->max_f_mq = 0;\n\n if (shards->version == FIXED_SIZE) {\n shards->set_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, (GDestroyNotify) g_list_free);\n\n } else {\n\n shards->set_table = NULL;\n }\n\n return shards;\n};\n\n/*original implementation*/\nSHARDS * SHARDS_new_init(double R_init, unsigned int bucket_size, object_Type type, unsigned int max_setsize, shards_version version) {\n SHARDS *shards = (SHARDS *) malloc(sizeof (SHARDS));\n if (shards == NULL) {\n return NULL;\n }\n shards->version = version;\n shards->dataType = type;\n shards->initial_R_value = R_init; // Not really used in the fixed_rate version.\n shards->R = R_init;\n\n uint64_t tmp = 1;\n tmp = tmp << 24;\n shards->P = tmp;\n\n shards->T = R_init*tmp;\n\n shards->bucket_size = bucket_size;\n\n switch (type) {\n case String:\n shards->time_table = g_hash_table_new_full(g_str_hash, g_str_equal, NULL, (GDestroyNotify) free);\n break;\n case Int:\n shards->time_table = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, (GDestroyNotify) free);\n break;\n case Uint64:\n shards->time_table = g_hash_table_new_full(g_uint64_hash, g_uint64_equal, NULL, (GDestroyNotify) free);\n break;\n case Double:\n shards->time_table = g_hash_table_new_full(g_double_hash, g_double_equal, NULL, (GDestroyNotify) free);\n break;\n\n }\n\n shards->dist_histogram = g_hash_table_new_full(g_int_hash, g_int_equal, NULL, (GDestroyNotify) free);\n shards->set_size = 0;\n shards->evic_obj = 0;\n\n shards->total_objects = 0;\n shards->num_obj = 0;\n shards->fraction = 0;\n shards->S_max = max_setsize;\n\n\n shards->dist_tree = NULL;\n shards->set_tree = NULL;\n shards->set_list = NULL;\n shards->set_list_search = NULL;\n\n if (shards->version == FIXED_SIZE) {\n shards->set_table = g_hash_table_new_full(g_direct_hash, g_direct_equal, NULL, (GDestroyNotify) g_list_free);\n\n } else {\n\n shards->set_table = NULL;\n }\n\n return shards;\n}\n\nSHARDS* SHARDS_fixed_rate_init(double R_init, unsigned int bucket_size, object_Type type) {\n\n //\tValidation\n if (R_init <= 0 || R_init > 1) {\n printf(\"Value of R must be in the range (0,1].\\n\");\n return NULL;\n }\n\n SHARDS *shards = SHARDS_new_init(R_init, bucket_size, type, 0, FIXED_RATE);\n\n return shards;\n}\n\nSHARDS* SHARDS_fixed_size_init(unsigned int max_setsize, unsigned int bucket_size, object_Type type) {\n\n if (max_setsize <= 0) {\n printf(\"The maximum size of the working set must be greater then 0.\\n\");\n return NULL;\n }\n\n SHARDS *shards = SHARDS_new_init(0.1, bucket_size, type, max_setsize, FIXED_SIZE);\n\n return shards;\n\n\n}\n\nSHARDS* SHARDS_fixed_size_init_R(unsigned int max_setsize, double R_init, unsigned int bucket_size, object_Type type) {\n\n if (R_init <= 0 || R_init > 1 || max_setsize <= 0) {\n return NULL;\n }\n\n SHARDS *shards = SHARDS_new_init(R_init, bucket_size, type, max_setsize, FIXED_SIZE);\n\n return shards;\n\n}\n\n/* LFU implementation*/\nSHARDS* SHARDS_fixed_size_init_R_LFU(unsigned int max_setsize, double R_init, unsigned int bucket_size, object_Type type) {\n\n if (R_init <= 0 || R_init > 1 || max_setsize <= 0) {\n return NULL;\n }\n\n SHARDS *shards = SHARDS_new_init_LFU(R_init, bucket_size, type, max_setsize, FIXED_SIZE);\n\n return shards;\n\n}\n\n/* MQ implementation*/\nSHARDS* SHARDS_fixed_size_init_R_MQ(unsigned int max_setsize, double R_init, unsigned int bucket_size, object_Type type) {\n\n if (R_init <= 0 || R_init > 1 || max_setsize <= 0) {\n return NULL;\n }\n\n SHARDS *shards = SHARDS_new_init_MQ(R_init, bucket_size, type, max_setsize, FIXED_SIZE);\n\n return shards;\n\n}\n\n/*\nThis function inserts an object into SHARDS for it to be processed.\n */\nint SHARDS_feed_obj(SHARDS *shards, void* object, size_t nbytes, char* algo, int level) {\n\n shards->total_objects++;\n\n uint64_t hash[2];\n uint64_t T_i = 0;\n unsigned int reuse_dist = 0;\n unsigned int bucket = 0;\n unsigned int bucket_size = shards->bucket_size;\n unsigned int eviction_key = 0;\n\n qhashmurmur3_128(object, nbytes, hash);\n T_i = hash[1] &((shards->P) - 1);\n\n //printf(\"Entered the feed_obj procedure.\\n\");\n\n\n if (shards->version == FIXED_SIZE) {\n //printf(\"T_i: %\"PRIu64 \" ? T: %\"PRIu64\"\\n\", T_i, shards->T);\n if (T_i < shards->T || level > 1) {\n //printf(\"T_i =%\" PRId64 \"\\n\"), T_i;\n //printf(\"########\\nObject accepted!\\n########\\n\");\n shards->num_obj++;\n shards->tick++;\n TNODE *RT_i = (TNODE *) malloc(sizeof (TNODE));\n RT_i->T = T_i;\n RT_i->freq = 1;\n RT_i->ref = 0;\n RT_i->obj = object;\n\n //printf(\"num_obj: %u\\n\", num_obj);\n if (strcmp(algo, \"LRU\") == 0)\n reuse_dist = calc_reuse_dist(object, shards->num_obj, &(shards->time_table), &(shards->dist_tree), shards->version);\n else if (strcmp(algo, \"LFU\") == 0)\n reuse_dist = calc_reuse_dist_LFU(object, T_i, &(shards->freq_table), &(shards->dist_tree_lfu), shards->version);\n else if (strcmp(algo, \"MQ\") == 0)\n reuse_dist = calc_reuse_dist_MQ(object, &shards, &RT_i, &(shards->mq_tick_table), &(shards->mq_freq_table), &(shards->dist_tree_mq), &(shards->mq_out), shards->tick, shards->life_time, shards->version);\n else if (strcmp(algo, \"FIFO\") == 0)\n reuse_dist = calc_reuse_dist_FIFO(object, shards->num_obj, &(shards->time_table), &(shards->dist_tree), shards->version);\n\n reuse_dist = (unsigned int) (reuse_dist / shards->R);\n //printf(\"Reuse dist:%u\\n\", reuse_dist);\n if (reuse_dist != 0) {\n\n bucket = ((reuse_dist - 1) / bucket_size) * bucket_size + bucket_size;\n //printf(\"b: %u\\n\", bucket);\n } else {\n bucket = 0;\n //printf(\"B: %u\\n\", bucket);\n }\n //printf(\"Reuse distance: %5u\\n\", reuse_dist);\n //printf(\"Bucket: %u\\n\",bucket );\n update_dist_table_fixed_size(bucket, &(shards->dist_histogram), shards->T);\n\n //Insert <object, T_i> into Set S\n\n shards->set_tree = insert(T_i, shards->set_tree);\n //printf(\"%d %p\\n\", set_tree->key, set_tree );\n //Lookup the list associated with the value T_i\n shards->set_list = (GList*) g_hash_table_lookup(shards->set_table, shards->set_tree);\n //If the search returns NULL (e.g the list doesnt exist), create a list and insert it\n if (shards->set_list == NULL) {\n shards->set_list = g_list_append(shards->set_list, object);\n g_hash_table_insert(shards->set_table, shards->set_tree, shards->set_list);\n shards->set_size++;\n\n } else {\n //If the search returns a list, search the object in the list\n\n if (shards->dataType == String) {\n shards->set_list_search = g_list_find_custom(shards->set_list, object, (GCompareFunc) strcmp);\n } else if (shards->dataType == Int) {\n shards->set_list_search = g_list_find_custom(shards->set_list, object, (GCompareFunc) intcmp);\n } else if (shards->dataType == Double) {\n shards->set_list_search = g_list_find_custom(shards->set_list, object, (GCompareFunc) doublecmp);\n } else {\n shards->set_list_search = g_list_find_custom(shards->set_list, object, (GCompareFunc) uint64cmp);\n }\n\n\n //If the object is not on the list, add it\n if (shards->set_list_search == NULL) {\n shards->set_list = g_list_append(shards->set_list, object);\n shards->set_list_search = NULL;\n shards->set_size += 1;\n\n } else {\n free(object);\n //object = NULL;\n }\n\n }\n //printf(\"Set_size: %u\\n\\n\", set_size);\n shards->set_list = NULL;\n\n if (shards->set_size > shards->S_max && FALSE) {\n //Eviction\n printf(\"EVICTION!!\\n\\n \");\n\n\n\n shards->evic_tree = find_rank((shards->set_tree->size) - 1, shards->set_tree);\n shards->evic_tree = splay(shards->evic_tree->key, shards->evic_tree);\n\n eviction_key = shards->evic_tree -> key;\n\n shards->set_list = (GList*) g_hash_table_lookup(shards->set_table, shards->evic_tree);\n\n\n while (1) {\n if (strcmp(algo, \"LRU\") == 0) {\n shards->dist_tree = deletetree(*(unsigned int *) (g_hash_table_lookup(shards->time_table, (char*) shards->set_list->data)), shards->dist_tree);\n\n g_hash_table_remove(shards->time_table, (char*) shards->set_list->data);\n } else if (strcmp(algo, \"LFU\") == 0) {\n qhashmurmur3_128((char*) shards->set_list->data, nbytes, hash);\n uint64_t evic_T_i = hash[1] &((shards->P) - 1);\n shards->dist_tree_lfu = deletetree_lfu(*(unsigned int *) (g_hash_table_lookup(shards->freq_table, (char*) shards->set_list->data)), shards->dist_tree_lfu, evic_T_i);\n\n g_hash_table_remove(shards->freq_table, (char*) shards->set_list->data);\n }\n\n free(shards->set_list->data);\n shards->set_size -= 1;\n shards->evic_obj += 1;\n\n if (shards->set_list->next == NULL) {\n\n break;\n }\n\n shards->set_list = shards->set_list->next;\n }\n //remove and free value from set_table\n g_hash_table_remove(shards->set_table, shards->evic_tree);\n\n\n\n //remove and free the eviction_key from set_tree, whose node is also the key for set_table\n shards->set_tree = deletetree(eviction_key, shards->set_tree);\n shards->evic_tree = NULL;\n\n shards->T = eviction_key;\n shards->R = ((double) shards->T) / shards->P;\n\n //printf(\"NEW R: %f T: %\"PRIu64\"\\n\", shards->R,shards->T);\n }\n\n } else {\n free(object);\n object = NULL;\n }\n\n } else {\n //FIXED RATE CODE GOES HERE\n\n if (T_i < shards->T) {\n shards->num_obj++;\n\n //printf(\"num_obj: %u\\n\", num_obj);\n\n reuse_dist = calc_reuse_dist(object, shards->num_obj, &(shards->time_table), &(shards->dist_tree), shards->version);\n reuse_dist = (unsigned int) (reuse_dist / shards->R);\n\n if (reuse_dist != 0) {\n\n bucket = ((reuse_dist - 1) / bucket_size) * bucket_size + bucket_size;\n\n } else {\n bucket = 0;\n }\n //printf(\"Reuse distance: %5u\\n\", reuse_dist);\n //printf(\"Bucket: %u\\n\",bucket );\n update_dist_table(bucket, &(shards->dist_histogram));\n\n\n\n } else {\n free(object);\n }\n\n }\n if (object == NULL) {\n return -1;\n } else {\n\n return bucket;\n }\n\n}\n\n/*\n This function frees the SHARDS data structure\n */\nvoid SHARDS_free(SHARDS* shards) {\n\n if (shards->version == FIXED_SIZE) {\n GList *keys = NULL;\n\n if (shards->dist_histogram != NULL) {\n keys = g_hash_table_get_keys(shards->dist_histogram);\n\n if (keys != NULL) {\n g_list_free_full(keys, (GDestroyNotify) free);\n }\n g_hash_table_destroy(shards->dist_histogram);\n }\n\n\n if (shards->time_table != NULL) {\n keys = g_hash_table_get_keys(shards->time_table);\n if (keys != NULL) {\n g_list_free_full(keys, (GDestroyNotify) free);\n }\n g_hash_table_destroy(shards->time_table);\n\n }\n\n if (shards->set_table != NULL) {\n g_hash_table_destroy(shards->set_table);\n }\n\n\n freetree(shards->set_tree);\n shards->set_tree = NULL;\n\n } else {\n\n if (shards->dist_histogram != NULL) {\n g_hash_table_destroy(shards->dist_histogram);\n\n }\n GList *keys = g_hash_table_get_keys(shards->time_table);\n if (keys != NULL) {\n g_list_free_full(keys, (GDestroyNotify) free);\n }\n\n if (shards->time_table != NULL) {\n\n g_hash_table_destroy(shards->time_table);\n\n }\n\n }\n\n freetree(shards->dist_tree);\n shards->dist_tree = NULL;\n free(shards);\n\n\n\n\n}\n\n/*\n Creates MRC for the Fixed-Rate version of SHARDS.\n */\n\nGHashTable *MRC_fixed_rate(SHARDS *shards) {\n\n GList *keys = g_hash_table_get_keys(shards->dist_histogram);\n keys = g_list_sort(keys, (GCompareFunc) intcmp);\n GList *tmp_keys = keys;\n GHashTable *tabla = g_hash_table_new_full(g_int_hash, g_int_equal, (GDestroyNotify) free, (GDestroyNotify) free);\n\n\n double *missrate = NULL;\n int *cache_size = NULL;\n unsigned int part_sum = 0;\n unsigned int total_sum = *(int*) (g_hash_table_lookup(shards->dist_histogram, keys->data));\n //printf(\"TOTAL SUM: %u \\n\", total_sum);\n\n keys = keys->next;\n while (1) {\n cache_size = (int*) malloc(sizeof (int));\n missrate = (double*) malloc(sizeof (double));\n part_sum = part_sum + *(int*) (g_hash_table_lookup(shards->dist_histogram, keys->data));\n //printf(\"PART SUM: %u \\n\", part_sum);\n *missrate = (double) part_sum;\n *cache_size = *(int*) (keys->data);\n //printf(\"%d %f\\n\", *cache_size, *missrate);\n g_hash_table_insert(tabla, cache_size, missrate);\n\n if (keys->next == NULL) {\n break;\n }\n keys = keys->next;\n }\n keys = tmp_keys;\n total_sum = total_sum + part_sum;\n //printf(\"TOTAL SUM: %u \\n\", total_sum);\n\n keys = keys->next; //ignoring the zero (infinity) reuse dist\n missrate = NULL;\n\n while (keys != NULL) {\n\n missrate = (double*) g_hash_table_lookup(tabla, keys->data);\n *missrate = 1.0 - (*missrate / total_sum);\n //printf(\"%d %f\\n\", *(int*)keys->data, *missrate);\n\n\n keys = keys->next;\n }\n g_list_free(tmp_keys);\n\n return tabla;\n}\n\n/*\n Creates MRC for the Fixed-Rate version of SHARDS and empities (but does not free) the internal data structures.\n */\n\nGHashTable *MRC_fixed_rate_empty(SHARDS* shards) {\n\n GList *keys = g_hash_table_get_keys(shards->dist_histogram);\n keys = g_list_sort(keys, (GCompareFunc) intcmp);\n GList *tmp_keys = keys;\n GList* remove_link = NULL;\n\n GHashTable *tabla = g_hash_table_new_full(g_int_hash, g_int_equal, (GDestroyNotify) free, (GDestroyNotify) free);\n\n\n\n double *missrate = NULL;\n int *cache_size = NULL;\n unsigned int part_sum = 0;\n unsigned int total_sum = *(int*) (g_hash_table_lookup(shards->dist_histogram, keys->data));\n //printf(\"TOTAL SUM: %u \\n\", total_sum);\n int hist_size = g_hash_table_size(shards->dist_histogram);\n\n\n\n\n\n if (hist_size > 1) {\n //keys = keys->next;\n remove_link = keys;\n\n keys = g_list_remove_link(keys, remove_link);\n g_hash_table_remove(shards->dist_histogram, remove_link->data);\n free(remove_link->data);\n g_list_free(remove_link);\n while (keys != NULL) {\n //cache_size = malloc(sizeof(int));\n missrate = (double*) malloc(sizeof (double));\n part_sum = part_sum + *(int*) (g_hash_table_lookup(shards->dist_histogram, keys->data));\n //printf(\"PART SUM: %u \\n\", part_sum);\n *missrate = (double) part_sum;\n cache_size = (int*) (keys->data);\n //printf(\"%d %f\\n\", *cache_size, *missrate);\n g_hash_table_insert(tabla, cache_size, missrate);\n //keys= keys->next;\n remove_link = keys;\n keys = g_list_remove_link(keys, remove_link);\n\n g_hash_table_remove(shards->dist_histogram, remove_link->data);\n g_list_free(remove_link);\n }\n keys = g_hash_table_get_keys(tabla);\n keys = g_list_sort(keys, (GCompareFunc) intcmp);\n total_sum = total_sum + part_sum;\n\n //printf(\"TOTAL SUM: %u \\n\", total_sum);\n\n missrate = NULL;\n tmp_keys = keys;\n while (keys != NULL) {\n\n missrate = (double*) g_hash_table_lookup(tabla, keys->data);\n *missrate = 1.0 - (*missrate / total_sum);\n //printf(\"%d %f\\n\", *(int*)keys->data, *missrate);\n keys = keys->next;\n\n }\n\n g_list_free(tmp_keys);\n\n } else if (hist_size == 1) {\n\n\n\n\n\n //\tif hist_size == 1\n //printf(\"WHATSUP\\n\");\n cache_size = (int*) malloc(sizeof (int));\n missrate = (double*) malloc(sizeof (double));\n *cache_size = *(int*) (keys->data);\n *missrate = 1.0;\n g_hash_table_insert(tabla, cache_size, missrate);\n //printf(\"GETOUT\\n\");\n remove_link = keys;\n keys = g_list_remove_link(keys, remove_link); //First parameter is the list, second is the node we wish to remove\n\n //Then we remove that key and its associated value from dist_histogram (this frees the value, not the key)\n g_hash_table_remove(shards->dist_histogram, remove_link->data);\n\n\n\n //free the data inside remove_link\n free(remove_link->data);\n //Now that the data is freed, we free the GList node itself.\n g_list_free(remove_link); //free the GList node\n\n //We dont need to do a keys=keys->next, g_list_remove_link() did that for us already\n //keys = keys->next;\n remove_link = NULL;\n\n } else {\n //printf(\"The reuse distance histogram(dist_histogram) is empty\");\n return NULL;\n\n }\n\n\n\n // Free the keys from time_table (object) which are also the data for the set_list that act as values for set_table\n keys = g_hash_table_get_keys(shards->time_table);\n //remove all the entries in time_table (object, t_i), where the time t_i is freed. Object is not\n g_hash_table_remove_all(shards->time_table);\n g_list_free_full(keys, (GDestroyNotify) free);\n keys = NULL;\n\n\n\n freetree(shards->dist_tree);\n shards->dist_tree = NULL;\n\n shards->total_objects = 0;\n shards->num_obj = 0;\n\n return tabla;\n}\n\n/*\n Creates MRC for the Fixed-Size version of SHARDS.\n */\n\nGHashTable *MRC_fixed_size(SHARDS *shards) {\n\n GList *keys = g_hash_table_get_keys(shards->dist_histogram);\n keys = g_list_sort(keys, (GCompareFunc) intcmp);\n GList *tmp_keys = keys;\n GHashTable *tabla = g_hash_table_new_full(g_int_hash, g_int_equal, (GDestroyNotify) free, (GDestroyNotify) free);\n\n uint64_t T_new = shards->T;\n double tmp = 0.0;\n\n\n\n double *missrate = NULL;\n int *cache_size = NULL;\n uint64_t total_sum = 0;\n uint64_t part_sum = 0;\n\n uint64_t *hist_value = (uint64_t*) (g_hash_table_lookup(shards->dist_histogram, keys->data));\n\n unsigned int hist_size = g_hash_table_size(shards->dist_histogram);\n\n if (hist_value[1] != T_new) {\n tmp = hist_value[0]*(((double) T_new) / hist_value[1]) + 1;\n total_sum = (uint64_t) tmp;\n /*z*/ printf(\"T_new T_old different\\n\");\n printf(\"T_new: %\"PRIu64\", T_old: %\"PRIu64\", hist_value[0]: %\"PRIu64\"\\n\", T_new, hist_value[1], hist_value[0]);\n } else {\n total_sum = *hist_value;\n }\n //printf(\"TOTAL SUM: %u \\n\", total_sum);\n\n //printf(\"cache size: %d total_sum: %\"PRIu64\" T: %\"PRIu64\" T_new %\"PRIu64\" \\n\", *(int*)keys->data, total_sum, hist_value[1], T_new);\n if (hist_size > 1) {\n keys = keys->next;\n while (1) {\n\n missrate = (double*) malloc(sizeof (double));\n\n hist_value = (uint64_t*) g_hash_table_lookup(shards->dist_histogram, keys->data);\n if (hist_value[1] != T_new) {\n tmp = hist_value[0]*(((double) T_new) / hist_value[1]) + 1;\n part_sum = part_sum + (uint64_t) tmp;\n /*z*/ printf(\"T_new T_old different\\n\");\n printf(\"T_new: %\"PRIu64\", T_old: %\"PRIu64\", hist_value[0]: %\"PRIu64\", tmp: %f, part_sum: %\"PRIu64\"\\n\", T_new, hist_value[1], hist_value[0], tmp, part_sum);\n } else {\n\n part_sum = part_sum + hist_value[0];\n }\n\n\n *missrate = (double) part_sum;\n cache_size = (int*) malloc(sizeof (int));\n *cache_size = *(int*) (keys->data);\n //printf(\"cache size: %d part_sum: %f T: %\"PRIu64\" T_new %\"PRIu64\" \\n\", *cache_size, *missrate, hist_value[1], T_new);\n g_hash_table_insert(tabla, cache_size, missrate);\n\n if (keys->next == NULL) {\n break;\n }\n keys = keys->next;\n }\n\n\n //keys = g_list_first(keys);\n keys = tmp_keys;\n\n total_sum = total_sum + part_sum;\n //printf(\"TOTAL SUM: %u \\n\", total_sum);\n\n keys = keys->next; //ignoring the zero (infinity) reuse dist\n missrate = NULL; //We are gonna use miss_rate again as a temp variable\n while (keys != NULL) {\n\n missrate = (double*) g_hash_table_lookup(tabla, keys->data);\n *missrate = 1.0 - (*missrate / total_sum);\n //printf(\"%d %f\\n\", *(int*)keys->data, *missrate);\n /*\n if(keys->next ==NULL){\n break;\n }\n */\n keys = keys->next;\n }\n } else {\n\n //\tif hist_size == 1\n\n cache_size = (int*) malloc(sizeof (int));\n missrate = (double*) malloc(sizeof (double));\n *cache_size = *(int*) (keys->data);\n *missrate = 1.0;\n g_hash_table_insert(tabla, cache_size, missrate);\n\n }\n //keys = g_list_first(keys);\n g_list_free(tmp_keys);\n\n return tabla;\n}\n\n/*\n Creates MRC for the Fixed-Size version of SHARDS and empities (but does not free) the internal data structures.\n */\n\nGHashTable *MRC_fixed_size_empty(SHARDS *shards) {\n\n\n GList *keys = g_hash_table_get_keys(shards->dist_histogram);\n keys = g_list_sort(keys, (GCompareFunc) intcmp);\n GList *tmp_keys = keys;\n GHashTable *tabla = g_hash_table_new_full(g_int_hash, g_int_equal, (GDestroyNotify) free, (GDestroyNotify) free);\n\n uint64_t T_new = shards->T;\n double tmp = 0.0;\n\n GList* remove_link = NULL;\n\n double *missrate = NULL;\n int *cache_size = NULL;\n uint64_t total_sum = 0;\n uint64_t part_sum = 0;\n\n unsigned int hist_size = g_hash_table_size(shards->dist_histogram);\n\n if (hist_size > 1) {\n\n uint64_t *hist_value = (uint64_t*) (g_hash_table_lookup(shards->dist_histogram, keys->data));\n\n\n if (hist_value[1] != T_new) {\n tmp = hist_value[0]*(((double) T_new) / hist_value[1]) + 1;\n total_sum = (uint64_t) tmp;\n } else {\n total_sum = *hist_value;\n }\n //We are going to remove the key, value pair for the key=0 in dist_histogram\n //First we remove the node in keys the has the value 0\n remove_link = keys;\n keys = g_list_remove_link(keys, remove_link); //First parameter is the list, second is the node we wish to remove\n\n //Then we remove that key and its associated value from dist_histogram (this frees the value, not the key)\n g_hash_table_remove(shards->dist_histogram, remove_link->data);\n //free the data inside remove_link\n free(remove_link->data);\n //Now that the data is freed, we free the GList node itself.\n g_list_free(remove_link); //free the GList node\n\n //We dont need to do a keys=keys->next, g_list_remove_link() did that for us already\n //keys = keys->next;\n remove_link = NULL;\n while (1) {\n //cache_size = malloc(sizeof(int));\n missrate = (double*) malloc(sizeof (double));\n\n hist_value = (uint64_t*) g_hash_table_lookup(shards->dist_histogram, keys->data);\n if (hist_value[1] != T_new) {\n tmp = hist_value[0]*(((double) T_new) / hist_value[1]) + 1;\n part_sum = part_sum + (uint64_t) tmp;\n } else {\n\n part_sum = part_sum + hist_value[0];\n }\n\n\n *missrate = (double) part_sum;\n cache_size = (int*) (keys->data);\n //printf(\"cache size: %d part_sum: %f T: %\"PRIu64\" T_new %\"PRIu64\" \\n\", *cache_size, *missrate, hist_value[1], T_new);\n g_hash_table_insert(tabla, cache_size, missrate);\n\n if (keys->next == NULL) {\n //Repeated code to be able to get out of the while loop\n //Maybe if the condition is while(keys!=NULL), don know if g_list_remove_link() returns NULL\n remove_link = keys;\n keys = g_list_remove_link(keys, remove_link);\n g_hash_table_remove(shards->dist_histogram, remove_link->data);\n g_list_free(remove_link);\n remove_link = NULL;\n break;\n }\n remove_link = keys;\n keys = g_list_remove_link(keys, remove_link);\n g_hash_table_remove(shards->dist_histogram, remove_link->data);\n g_list_free(remove_link);\n remove_link = NULL;\n //keys= keys->next;\n }\n\n keys = g_hash_table_get_keys(tabla);\n keys = g_list_sort(keys, (GCompareFunc) intcmp);\n tmp_keys = keys;\n //keys = g_list_first(keys);\n total_sum = total_sum + part_sum;\n //printf(\"TOTAL SUM: %u \\n\", total_sum);\n\n\n missrate = NULL; //We are gonna use miss_rate again as a temp variable\n while (keys != NULL) {\n\n missrate = (double*) g_hash_table_lookup(tabla, keys->data);\n *missrate = 1.0 - (*missrate / total_sum);\n //printf(\"%d %f\\n\", *(int*)keys->data, *missrate);\n /*\n if(keys->next ==NULL){\n break;\n }\n */\n keys = keys->next;\n }\n\n //keys = g_list_first(keys);\n g_list_free(tmp_keys);\n tmp_keys = NULL;\n } else if (hist_size == 1) {\n\n\n cache_size = (int*) malloc(sizeof (int));\n missrate = (double*) malloc(sizeof (double));\n *cache_size = *(int*) (keys->data);\n *missrate = 1.0;\n g_hash_table_insert(tabla, cache_size, missrate);\n\n remove_link = keys;\n keys = g_list_remove_link(keys, remove_link); //First parameter is the list, second is the node we wish to remove\n\n //Then we remove that key and its associated value from dist_histogram (this frees the value, not the key)\n g_hash_table_remove(shards->dist_histogram, remove_link->data);\n\n\n\n\n\n //free the data inside remove_link\n free(remove_link->data);\n //Now that the data is freed, we free the GList node itself.\n g_list_free(remove_link); //free the GList node\n\n //We dont need to do a keys=keys->next, g_list_remove_link() did that for us already\n //keys = keys->next;\n remove_link = NULL;\n\n } else {\n //printf(\"The reuse distance histogram (dist_histogram) is empty\");\n return NULL;\n\n }\n\n // Free the keys from time_table (object) which are also the data for the set_list that act as values for set_table\n keys = g_hash_table_get_keys(shards->time_table);\n g_list_free_full(keys, (GDestroyNotify) free);\n keys = NULL;\n //remove all the entries in time_table (object, t_i), where the time t_i is freed. Object is not\n g_hash_table_remove_all(shards->time_table);\n //remove every entry in set_table, freeing all the values, which are GLists, using g_list_free\n g_hash_table_foreach_remove(shards->set_table, (GHRFunc) dummy, NULL);\n\n\n //Freeing dist_tree\n freetree(shards->dist_tree);\n shards->dist_tree = NULL;\n //freeing set_tree, whose nodes act as the keys in set_table\n freetree(shards->set_tree);\n shards->set_tree = NULL;\n //shards->dist_histogram==NULL;\n\n //If you are using this function, it means that you want to reuse the SHARDS struct, so we need to reset\n //the value of R and T to their initial values.\n\n shards->R = shards->initial_R_value;\n shards->T = (shards->R)*(shards->P);\n shards->set_size = 0;\n shards->total_objects = 0;\n shards->num_obj = 0;\n\n return tabla;\n\n\n}\n\nGHashTable *MRC(SHARDS* shards) {\n\n if (shards->version == FIXED_RATE) {\n return MRC_fixed_rate(shards);\n } else {\n\n return MRC_fixed_size(shards);\n }\n\n}\n\nGHashTable *MRC_empty(SHARDS* shards) {\n\n if (shards->version == FIXED_RATE) {\n return MRC_fixed_rate_empty(shards);\n } else {\n return MRC_fixed_size_empty(shards);\n }\n}\n\n\n" }, { "alpha_fraction": 0.649479866027832, "alphanum_fraction": 0.6630483865737915, "avg_line_length": 17.711864471435547, "blob_id": "689dcc90db092df5aa81846f70f256c466b48e95", "content_id": "d285d017e65ecbe44fc7a27002b03e79ee67b788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2211, "license_type": "no_license", "max_line_length": 107, "num_lines": 118, "path": "/include/minisim.h", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "/*\n * To change this license header, choose License Headers in Project Properties.\n * To change this template file, choose Tools | Templates\n * and open the template in the editor.\n */\n\n/*\n * File: minisim.h\n * Author: zliu\n *\n * Created on March 5, 2019, 8:22 PM\n */\n\n#ifndef __MINISIM_H\n#define __MINISIM_H\n\n#include <stdlib.h>\n#include <stdio.h>\n#include <glib.h>\n#include <string.h>\n#include <stdint.h>\n#include <inttypes.h>\n#include \"shards_utils.h\"\n#include \"priority_queue.h\"\n\n\n\ntypedef struct minisim_elem MINISIM;\ntypedef struct cfg_elem CFG_ELEM;\n\nstruct cfg_elem {\n int t1_s1;\n int t1_s2;\n double t1_p1;\n double t1_p2;\n int t2_s1;\n int t2_s2;\n double t2_p1;\n double t2_p2;\n\n};\n\nstruct minisim_elem {\n unsigned int hitcount;\n unsigned int count;\n\n //LRU object\n GQueue *LRU_queue;\n unsigned int LRU_cache_size;\n unsigned int LRU_list_size;\n\n\n\n unsigned int ARC_cache_size;\n unsigned int ARC_items_cached;\n\n GHashTable *ARC_cached;\n\n unsigned int ARC_hitcount;\n unsigned int ARC_count;\n //double ARC_p;\n int ARC_p;\n\n\n //Double-ended queue for ARC\n GQueue *ARC_t1;\n GQueue *ARC_t2;\n GQueue *ARC_b1;\n GQueue *ARC_b2;\n\n\n //data structure for LeCar\n //total cache hit\n unsigned int lecar_hitcount;\n //total cache access\n unsigned int lecar_count;\n // size of the cache\n unsigned int lecar_N;\n //history size\n unsigned int lecar_H;\n double lecar_initial_weight;\n double lecar_discount_rate;\n\n GList *lecar_CacheRecency;\n\n GList *lecar_Hist1;\n GList *lecar_Hist2;\n\n GList *lecar_PQ;\n GHashTable *lecar_freq;\n\n double lecar_learning_rate;\n double lecar_learning_rate_lfu;\n double lecar_learning_rate_lru;\n\n};\n\nMINISIM * MINISIM_new_init_LeCaR(unsigned int cache_size);\n\n\nMINISIM * MINISIM_new_init_ARC(unsigned int cache_size);\nunsigned int MINISIM_get_obj(MINISIM *minisim, unsigned int obj_int, size_t nbytes, char* algo, int level);\nunsigned int MINISIM_put_obj(MINISIM *minisim, unsigned int obj_int, size_t nbytes, char* algo, int level);\n\nint ARC_put();\nint ARC_get();\n\nint LeCar_put();\nint LeCar_get();\n\n\nint MQ_put();\nint MQ_get();\n\n\n\n\n#endif /* __MINISIM_H */\n\n\n\n" }, { "alpha_fraction": 0.6028971076011658, "alphanum_fraction": 0.6648351550102234, "avg_line_length": 22.821428298950195, "blob_id": "a6714845ca40da98c70c02fc3c7c4d064ad9afa5", "content_id": "b99514101f1581a737ed45b3216d08f6cf4a3fce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 2002, "license_type": "no_license", "max_line_length": 122, "num_lines": 84, "path": "/Exp1.2.sh", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho \"Exp1 is about run time analysis\"\n\ntrace='prn_0.csv.gz.out'\ncfg='minisim_minisim_prn_0.0.01.mrc_LRU_LRU.cfg'\n#cfg is not used here\nR='0.02'\nstep_size='19921'\nres='50'\npolicy='LRU'\noutput='test_0'\necho \"doing for trace $trace \" \necho \"\\n\"\necho \"R=$R, step = $res\"\necho \"time for single tier cache LRU is \"\nmytime=$(time ./shards_test 20 $step_size $R ../traces/msr/$trace $output $policy 2>&1 1>/dev/null )\necho \"$mytime\" | head -n 1 | cut -d' ' -f1\n\n\necho \"calucate time for 2 tier cache\"\nrm *.csv\nrm *.count\necho \"generating input for tier 2\"\n./shards_3d_talus_mini2 20 ../traces/$cfg $R ../traces/msr/$trace $output $policy $policy 10000000 $res 2>&1 1>/dev/null \n\necho \"calculate time use shards_test \"\nstart=`date +%s`\nfor i in *.csv\ndo\n ./shards_test 20 19921 1.0 ./$i $output $policy 2>&1 1>/dev/null\ndone\nend=`date +%s`\nruntime=$((end - start))\necho \"TIER2 runtime is : $runtime \\n\"\n\n\necho \"calculate time for 3 tier caches\"\necho \"update .count\"\nfor i in *.csv\ndo\n\tcp ../traces/msr/$trace.count $i.count 2>&1 1>/dev/null\ndone\necho \"generating input for tier 3\"\nfor i in *.csv\ndo\n\techo $i\n\t./shards_3d_talus_mini2 20 ../traces/$cfg 1.0 $i $output $policy $policy 10000000 $res 2>&1 1>/dev/null \ndone\n\n\necho \"calculate time used shards_test\"\nstart=`date +%s`\nfor i in *.csv\ndo\n ./shards_test 20 19921 1.0 ./$i $output $policy 2>&1 1>/dev/null\ndone\nend=`date +%s`\nruntime=$((end - start))\necho \"TIER3 runtime is :$runtime \\n\"\n\n\necho \"calculate time for 4 tier caches\"\necho \"update .count\"\nfor i in *.csv\ndo\n\tcp ../traces/msr/$trace.count $i.count 2>&1 1>/dev/null\ndone\necho \"generating input for tier 4\"\nfor i in *.csv\ndo\n\techo $i\n\t./shards_3d_talus_mini2 20 ../traces/$cfg 1.0 $i $output $policy $policy 10000000 $res 2>&1 1>/dev/null \ndone\n\n\necho \"calculate time used shards_test\"\nstart=`date +%s`\nfor i in *.csv\ndo\n ./shards_test 20 19921 1.0 ./$i $output $policy 2>&1 1>/dev/null\ndone\nend=`date +%s`\nruntime=$((end - start))\necho \"TIER4 runtime is :$runtime \\n\"\n\n" }, { "alpha_fraction": 0.4560667872428894, "alphanum_fraction": 0.47853779792785645, "avg_line_length": 29.90220069885254, "blob_id": "d8e0cae2e5c75ad3ac4aa308712a5e4433e5f928", "content_id": "1591d80860e9a7a727b25fc504ce9f3c758365f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 25277, "license_type": "no_license", "max_line_length": 172, "num_lines": 818, "path": "/src/shards_utils.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#include \"shards_utils.h\"\n\n#include <inttypes.h>\n#include <stdint.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <stdbool.h>\n#include <string.h>\n#include <unistd.h>\n#include <fcntl.h>\n#include <errno.h>\n#include <sys/types.h>\n#include <sys/stat.h>\n\n//murmurhash3.c COMES FROM wolkykim/qlibc\n\n\n//bool qhashmurmur3_128(const void *data, size_t nbytes, void *retbuf);\n\n/*\nint main(){\n uint64_t buffer[2];\n\n char *x = \"874454515nbcfcvgjdtssryfyu68481455\";\n bool one = qhashmurmur3_128( x , 1 ,buffer);\n uint64_t c0 = buffer[0];\n uint64_t c1 = buffer[1];\n printf(\"%6\" PRIu64 \" \\n\",c0);\n printf(\"%6\" PRIu64 \" \\n\",c1);\n}\n */\n\nbool qhashmurmur3_128(const void *data, size_t nbytes, void *retbuf) {\n if (data == NULL || nbytes == 0)\n return false;\n\n const uint64_t c1 = 0x87c37b91114253d5ULL;\n const uint64_t c2 = 0x4cf5ad432745937fULL;\n\n const int nblocks = nbytes / 16;\n const uint64_t *blocks = (const uint64_t *) (data);\n const uint8_t *tail = (const uint8_t *) ((const uint8_t *) data + (nblocks * 16));\n\n uint64_t h1 = 0;\n uint64_t h2 = 0;\n\n int i;\n uint64_t k1, k2;\n for (i = 0; i < nblocks; i++) {\n k1 = blocks[i * 2 + 0];\n k2 = blocks[i * 2 + 1];\n\n k1 *= c1;\n k1 = (k1 << 31) | (k1 >> (64 - 31));\n k1 *= c2;\n h1 ^= k1;\n\n h1 = (h1 << 27) | (h1 >> (64 - 27));\n h1 += h2;\n h1 = h1 * 5 + 0x52dce729;\n\n k2 *= c2;\n k2 = (k2 << 33) | (k2 >> (64 - 33));\n k2 *= c1;\n h2 ^= k2;\n\n h2 = (h2 << 31) | (h2 >> (64 - 31));\n h2 += h1;\n h2 = h2 * 5 + 0x38495ab5;\n }\n\n k1 = k2 = 0;\n switch (nbytes & 15) {\n case 15:\n k2 ^= (uint64_t) (tail[14]) << 48;\n case 14:\n k2 ^= (uint64_t) (tail[13]) << 40;\n case 13:\n k2 ^= (uint64_t) (tail[12]) << 32;\n case 12:\n k2 ^= (uint64_t) (tail[11]) << 24;\n case 11:\n k2 ^= (uint64_t) (tail[10]) << 16;\n case 10:\n k2 ^= (uint64_t) (tail[9]) << 8;\n case 9:\n k2 ^= (uint64_t) (tail[8]) << 0;\n k2 *= c2;\n k2 = (k2 << 33) | (k2 >> (64 - 33));\n k2 *= c1;\n h2 ^= k2;\n\n case 8:\n k1 ^= (uint64_t) (tail[7]) << 56;\n case 7:\n k1 ^= (uint64_t) (tail[6]) << 48;\n case 6:\n k1 ^= (uint64_t) (tail[5]) << 40;\n case 5:\n k1 ^= (uint64_t) (tail[4]) << 32;\n case 4:\n k1 ^= (uint64_t) (tail[3]) << 24;\n case 3:\n k1 ^= (uint64_t) (tail[2]) << 16;\n case 2:\n k1 ^= (uint64_t) (tail[1]) << 8;\n case 1:\n k1 ^= (uint64_t) (tail[0]) << 0;\n k1 *= c1;\n k1 = (k1 << 31) | (k1 >> (64 - 31));\n k1 *= c2;\n h1 ^= k1;\n };\n\n //----------\n // finalization\n\n h1 ^= nbytes;\n h2 ^= nbytes;\n\n h1 += h2;\n h2 += h1;\n\n h1 ^= h1 >> 33;\n h1 *= 0xff51afd7ed558ccdULL;\n h1 ^= h1 >> 33;\n h1 *= 0xc4ceb9fe1a85ec53ULL;\n h1 ^= h1 >> 33;\n\n h2 ^= h2 >> 33;\n h2 *= 0xff51afd7ed558ccdULL;\n h2 ^= h2 >> 33;\n h2 *= 0xc4ceb9fe1a85ec53ULL;\n h2 ^= h2 >> 33;\n\n h1 += h2;\n h2 += h1;\n\n ((uint64_t *) retbuf)[0] = h1;\n ((uint64_t *) retbuf)[1] = h2;\n\n return true;\n}\n\n\n/*\n An implementation of top-down splaying with sizes\n D. Sleator <[email protected]>, January 1994.\nModified a little by Qingpeng Niu for tracing the global chunck library memory use. Just add a compute sum of size from search node to the right most node.\n */\n\n/*\n An implementation of top-down splaying with sizes\n D. Sleator <[email protected]>, January 1994.\n\n This extends top-down-splay.c to maintain a size field in each node.\n This is the number of nodes in the subtree rooted there. This makes\n it possible to efficiently compute the rank of a key. (The rank is\n the number of nodes to the left of the given key.) It it also\n possible to quickly find the node of a given rank. Both of these\n operations are illustrated in the code below. The remainder of this\n introduction is taken from top-down-splay.c.\n\n \"Splay trees\", or \"self-adjusting search trees\" are a simple and\n efficient data structure for storing an ordered set. The data\n structure consists of a binary tree, with no additional fields. It\n allows searching, insertion, deletion, deletemin, deletemax,\n splitting, joining, and many other operations, all with amortized\n logarithmic performance. Since the trees adapt to the sequence of\n requests, their performance on real access patterns is typically even\n better. Splay trees are described in a number of texts and papers\n [1,2,3,4].\n\n The code here is adapted from simple top-down splay, at the bottom of\n page 669 of [2]. It can be obtained via anonymous ftp from\n spade.pc.cs.cmu.edu in directory /usr/sleator/public.\n\n The chief modification here is that the splay operation works even if the\n item being splayed is not in the tree, and even if the tree root of the\n tree is NULL. So the line:\n\n t = splay(i, t);\n\n causes it to search for item with key i in the tree rooted at t. If it's\n there, it is splayed to the root. If it isn't there, then the node put\n at the root is the last one before NULL that would have been reached in a\n normal binary search for i. (It's a neighbor of i in the tree.) This\n allows many other operations to be easily implemented, as shown below.\n\n [1] \"Data Structures and Their Algorithms\", Lewis and Denenberg,\n Harper Collins, 1991, pp 243-251.\n [2] \"Self-adjusting Binary Search Trees\" Sleator and Tarjan,\n JACM Volume 32, No 3, July 1985, pp 652-686.\n [3] \"Data Structure and Algorithm Analysis\", Mark Weiss,\n Benjamin Cummins, 1992, pp 119-130.\n [4] \"Data Structures, Algorithms, and Performance\", Derick Wood,\n Addison-Wesley, 1993, pp 367-375\n */\nTree_mq * splay_mq(T f, Tree_mq *t) {\n Tree_mq N, *l, *r, *y;\n T comp, l_size, r_size;\n if (t == NULL) return t;\n N.left = N.right = NULL;\n // N.list = NULL;\n l = r = &N;\n l_size = r_size = 0;\n\n for (;;) {\n comp = compare(f, t->key);\n if (comp < 0) {\n if (t->left == NULL) break;\n if (compare(f, t->left->key) < 0) {\n y = t->left; /* rotate right */\n t->left = y->right;\n y->right = t;\n t->size = node_size(t->left) + node_size(t->right) + t->list_size;\n t = y;\n if (t->left == NULL) break;\n }\n r->left = t; /* link right */\n r = t;\n t = t->left;\n r_size += r->list_size + node_size(r->right); /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n } else if (comp > 0) {\n if (t->right == NULL) break;\n if (compare(f, t->right->key) > 0) {\n y = t->right; /* rotate left */\n t->right = y->left;\n y->left = t;\n t->size = node_size(t->left) + node_size(t->right) + t->list_size;\n t = y;\n if (t->right == NULL) break;\n }\n l->right = t; /* link left */\n l = t;\n t = t->right;\n l_size += l->list_size + node_size(l->left); /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n } else {\n break;\n }\n }\n l_size += node_size(t->left); /* Now l_size and r_size are the sizes of */\n r_size += node_size(t->right); /* the left and right trees we just built.*/\n\n t->size = l_size + r_size + t->list_size; /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n\n l->right = r->left = NULL;\n\n /* The following two loops correct the size fields of the right path */\n /* from the left child of the root and the right path from the left */\n /* child of the root. */\n for (y = N.right; y != NULL; y = y->right) {\n y->size = l_size;\n l_size -= y->list_size + node_size(y->left); /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n }\n for (y = N.left; y != NULL; y = y->left) {\n y->size = r_size;\n r_size -= y->list_size + node_size(y->right); /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n }\n\n l->right = t->left; /* assemble */\n r->left = t->right;\n t->left = N.right;\n t->right = N.left;\n\n return t;\n}\n\nTree_lfu * splay_lfu(T f, Tree_lfu *t) {\n Tree_lfu N, *l, *r, *y;\n T comp, l_size, r_size;\n if (t == NULL) return t;\n N.left = N.right = NULL;\n // N.list = NULL;\n l = r = &N;\n l_size = r_size = 0;\n\n for (;;) {\n comp = compare(f, t->key);\n if (comp < 0) {\n if (t->left == NULL) break;\n if (compare(f, t->left->key) < 0) {\n y = t->left; /* rotate right */\n t->left = y->right;\n y->right = t;\n t->size = node_size(t->left) + node_size(t->right) + t->list_size;\n t = y;\n if (t->left == NULL) break;\n }\n r->left = t; /* link right */\n r = t;\n t = t->left;\n r_size += r->list_size + node_size(r->right); /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n } else if (comp > 0) {\n if (t->right == NULL) break;\n if (compare(f, t->right->key) > 0) {\n y = t->right; /* rotate left */\n t->right = y->left;\n y->left = t;\n t->size = node_size(t->left) + node_size(t->right) + t->list_size;\n t = y;\n if (t->right == NULL) break;\n }\n l->right = t; /* link left */\n l = t;\n t = t->right;\n l_size += l->list_size + node_size(l->left); /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n } else {\n break;\n }\n }\n l_size += node_size(t->left); /* Now l_size and r_size are the sizes of */\n r_size += node_size(t->right); /* the left and right trees we just built.*/\n\n t->size = l_size + r_size + t->list_size; /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n\n l->right = r->left = NULL;\n\n /* The following two loops correct the size fields of the right path */\n /* from the left child of the root and the right path from the left */\n /* child of the root. */\n for (y = N.right; y != NULL; y = y->right) {\n y->size = l_size;\n l_size -= y->list_size + node_size(y->left); /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n }\n for (y = N.left; y != NULL; y = y->left) {\n y->size = r_size;\n r_size -= y->list_size + node_size(y->right); /* TODO: NEED A SECOND LOOK, POTENTIAL DEBUG*/\n }\n\n l->right = t->left; /* assemble */\n r->left = t->right;\n t->left = N.right;\n t->right = N.left;\n\n return t;\n}\n\nTree * splay(T i, Tree *t)\n/* Splay using the key i (which may or may not be in the tree.) */\n/* The starting root is t, and the tree used is defined by rat */\n/* size fields are maintained */ {\n Tree N, *l, *r, *y;\n T comp, l_size, r_size;\n if (t == NULL) return t;\n N.left = N.right = NULL;\n l = r = &N;\n l_size = r_size = 0;\n\n for (;;) {\n comp = compare(i, t->key);\n if (comp < 0) {\n if (t->left == NULL) break;\n if (compare(i, t->left->key) < 0) {\n y = t->left; /* rotate right */\n t->left = y->right;\n y->right = t;\n t->size = node_size(t->left) + node_size(t->right) + 1;\n t = y;\n if (t->left == NULL) break;\n }\n r->left = t; /* link right */\n r = t;\n t = t->left;\n r_size += 1 + node_size(r->right);\n } else if (comp > 0) {\n if (t->right == NULL) break;\n if (compare(i, t->right->key) > 0) {\n y = t->right; /* rotate left */\n t->right = y->left;\n y->left = t;\n t->size = node_size(t->left) + node_size(t->right) + 1;\n t = y;\n if (t->right == NULL) break;\n }\n l->right = t; /* link left */\n l = t;\n t = t->right;\n l_size += 1 + node_size(l->left);\n } else {\n break;\n }\n }\n l_size += node_size(t->left); /* Now l_size and r_size are the sizes of */\n r_size += node_size(t->right); /* the left and right trees we just built.*/\n t->size = l_size + r_size + 1;\n\n l->right = r->left = NULL;\n\n /* The following two loops correct the size fields of the right path */\n /* from the left child of the root and the right path from the left */\n /* child of the root. */\n for (y = N.right; y != NULL; y = y->right) {\n y->size = l_size;\n l_size -= 1 + node_size(y->left);\n }\n for (y = N.left; y != NULL; y = y->left) {\n y->size = r_size;\n r_size -= 1 + node_size(y->right);\n }\n\n l->right = t->left; /* assemble */\n r->left = t->right;\n t->left = N.right;\n t->right = N.left;\n\n return t;\n}\n\nTree_lfu * insert_lfu(T f, Tree_lfu * t, uint64_t T_i) {\n /* f is the frequency of T_i*/\n /* Each node of the Tree is identified by frequency f, the node also contains list of keys such as T_i */\n /* Insert key T_i into the tree t, if it is not already there. */\n /* Return a pointer to the resulting tree. */\n Tree_lfu * newtree; //Changed the name of this variable from \"new\" to \"newtree\" to allow compiling in C++\n\n if (t != NULL) {\n t = splay_lfu(f, t);\n if (compare(f, t->key) == 0) {\n t->list = g_list_append(t->list, GUINT_TO_POINTER(T_i)); /* Add T_i into t-> */\n t->size = t->size + 1;\n t->list_size = t->list_size + 1;\n return t;\n }\n }\n newtree = (Tree_lfu *) malloc(sizeof (Tree_lfu));\n if (newtree == NULL) {\n printf(\"Ran out of space\\n\");\n exit(1);\n }\n if (t == NULL) {\n newtree->left = newtree->right = NULL;\n newtree->list = NULL;\n } else if (compare(f, t->key) < 0) {\n newtree->left = t->left;\n newtree->right = t;\n t->left = NULL;\n t->size = t->list_size + node_size(t->right);\n } else {\n newtree->right = t->right;\n newtree->left = t;\n t->right = NULL;\n t->size = t->list_size + node_size(t->left);\n }\n newtree->key = f;\n newtree->list = NULL;\n newtree->list = g_list_append(newtree->list, GUINT_TO_POINTER(T_i));\n newtree->list_size = 1;\n newtree->size = 1 + node_size(newtree->left) + node_size(newtree->right);\n return newtree;\n}\n\nTree_mq * insert_mq(T f, Tree_mq * t, uint64_t T_i) {\n /* f is the frequency of T_i*/\n /* Each node of the Tree is identified by frequency f, the node also contains list of keys such as T_i */\n /* Insert key T_i into the tree t, if it is not already there. */\n /* Return a pointer to the resulting tree. */\n Tree_mq * newtree; //Changed the name of this variable from \"new\" to \"newtree\" to allow compiling in C++\n //printf(\"in insert function %d\\n\", GPOINTER_TO_UINT(GUINT_TO_POINTER(T_i)));\n\n if (t != NULL) {\n t = splay_mq(f, t);\n if (compare(f, t->key) == 0) {\n t->list = g_list_append(t->list, GUINT_TO_POINTER(T_i)); /* Add T_i into t-> */\n t->size = t->size + 1;\n t->list_size = t->list_size + 1;\n return t;\n }\n }\n newtree = (Tree_mq *) malloc(sizeof (Tree_mq));\n if (newtree == NULL) {\n printf(\"Ran out of space\\n\");\n exit(1);\n }\n if (t == NULL) {\n newtree->left = newtree->right = NULL;\n newtree->list = NULL;\n } else if (compare(f, t->key) < 0) {\n newtree->left = t->left;\n newtree->right = t;\n t->left = NULL;\n t->size = t->list_size + node_size(t->right);\n } else {\n newtree->right = t->right;\n newtree->left = t;\n t->right = NULL;\n t->size = t->list_size + node_size(t->left);\n }\n newtree->key = f;\n newtree->list = NULL;\n newtree->list = g_list_append(newtree->list, GUINT_TO_POINTER(T_i));\n newtree->list_size = 1;\n newtree->size = 1 + node_size(newtree->left) + node_size(newtree->right);\n\n return newtree;\n}\n\nTree * insert(T i, Tree * t) {\n /* Insert key i into the tree t, if it is not already there. */\n /* Return a pointer to the resulting tree. */\n Tree * newtree; //Changed the name of this variable from \"new\" to \"newtree\" to allow compiling in C++\n\n if (t != NULL) {\n t = splay(i, t);\n if (compare(i, t->key) == 0) {\n return t; /* it's already there */\n }\n }\n newtree = (Tree *) malloc(sizeof (Tree));\n if (newtree == NULL) {\n printf(\"Ran out of space\\n\");\n exit(1);\n }\n if (t == NULL) {\n newtree->left = newtree->right = NULL;\n } else if (compare(i, t->key) < 0) {\n newtree->left = t->left;\n newtree->right = t;\n t->left = NULL;\n t->size = 1 + node_size(t->right);\n } else {\n newtree->right = t->right;\n newtree->left = t;\n t->right = NULL;\n t->size = 1 + node_size(t->left);\n }\n newtree->key = i;\n newtree->size = 1 + node_size(newtree->left) + node_size(newtree->right);\n\n return newtree;\n}\n\nTree_lfu * deletetree_lfu(T f, Tree_lfu * t, uint64_t T_i) {\n /* f is the frequency of T_i*/\n /* Each node of the Tree is identified by frequency f, the node also contains list of keys such as T_i */\n /* Deletes T_i from the tree if it's there. */\n /* Return a pointer to the resulting tree. */\n Tree_lfu * x;\n //T tsize;\n\n if (t == NULL) return NULL;\n //tsize = t->size;\n t = splay_lfu(f, t);\n if (compare(f, t->key) == 0) { /* found it */\n if (t->list_size == 1) { /*T_i is the only element in this node, this node will be deleted */\n if (t->left == NULL) {\n x = t->right;\n } else {\n x = splay_lfu(f, t->left);\n x->right = t->right;\n }\n free(t);\n if (x != NULL) {\n x->size = x->list_size + node_size(x->left) + node_size(x->right);\n }\n return x;\n } else {\n t->list = g_list_remove(t->list, GUINT_TO_POINTER(T_i));\n t->list_size = t->list_size - 1;\n t->size = t->size - 1;\n return t;\n }\n\n } else {\n\n return t; /* It wasn't there */\n }\n}\n\nTree_mq * deletetree_mq(T f, Tree_mq * t, uint64_t T_i) {\n /* f is the frequency of T_i*/\n /* Each node of the Tree is identified by frequency f, the node also contains list of keys such as T_i */\n /* Deletes T_i from the tree if it's there. */\n /* Return a pointer to the resulting tree. */\n Tree_mq * x;\n //T tsize;\n\n if (t == NULL) return NULL;\n //tsize = t->size;\n t = splay_mq(f, t);\n if (compare(f, t->key) == 0) { /* found it */\n if (t->list_size == 1) { /*T_i is the only element in this node, this node will be deleted */\n if (t->left == NULL) {\n x = t->right;\n } else {\n x = splay_mq(f, t->left);\n x->right = t->right;\n }\n free(t);\n if (x != NULL) {\n x->size = x->list_size + node_size(x->left) + node_size(x->right);\n }\n return x;\n } else {\n t->list = g_list_remove(t->list, GUINT_TO_POINTER(T_i));\n t->size = t->size - 1;\n return t;\n }\n\n } else {\n\n return t; /* It wasn't there */\n }\n}\n\nTree * deletetree(T i, Tree *t) {\n /* Deletes i from the tree if it's there. */\n /* Return a pointer to the resulting tree. */\n Tree * x;\n T tsize;\n\n if (t == NULL) return NULL;\n tsize = t->size;\n t = splay(i, t);\n if (compare(i, t->key) == 0) { /* found it */\n if (t->left == NULL) {\n x = t->right;\n } else {\n x = splay(i, t->left);\n x->right = t->right;\n }\n free(t);\n if (x != NULL) {\n x->size = tsize - 1;\n }\n return x;\n } else {\n\n return t; /* It wasn't there */\n }\n}\n\nTree *find_rank(T r, Tree *t) {\n /* Returns a pointer to the node in the tree with the given rank. */\n /* Returns NULL if there is no such node. */\n /* Does not change the tree. To guarantee logarithmic behavior, */\n /* the node found here should be splayed to the root. */\n T lsize;\n if ((r < 0) || (r >= node_size(t))) return NULL;\n for (;;) {\n lsize = node_size(t->left);\n if (r < lsize) {\n t = t->left;\n } else if (r > lsize) {\n r = r - lsize - 1;\n t = t->right;\n } else {\n\n return t;\n }\n }\n}\n\nvoid freetree(Tree* t) {\n\n if (t == NULL) return;\n freetree(t->right);\n freetree(t->left);\n free(t);\n}\n\nvoid printtree(Tree * t, int d) {\n //printf(\"%p\\n\",t);\n int i;\n if (t == NULL) return;\n printtree(t->right, d + 1);\n\n for (i = 0; i < d; i++) printf(\" \");\n printf(\"%d(%d)\\n\", t->key, t->size);\n printtree(t->left, d + 1);\n}\n\n/*\ncalc_distnce() implemented by Jorge Murillo, 2017\n\nThis function takes a key and returns the numbers of keys that have a greater or equal value than that given.\nFor SHARDS, the key is the Timestamp of the reference to a object in the trace (1 for the first object, 2 for the second, etc.) and the returned value\nis the reuse distance of that reference.\nFor example: If we have a trace with the following objects\n\n a b c a d d b\n ^\nFor the 5th object read in the trace ( the second instance of 'a'), we call calc_distance(5, dist_tree), and the returned value will be 3. Because the tree would look like:\n\n c\n /\n b\n /\n a\n\nMeaning that there are at least three values equal or greater than a.\n */\nint calc_distance(T timestamp, Tree *t) {\n\n int d = 1;\n int current_key = 0;\n for (;;) {\n current_key = t->key;\n if (timestamp > current_key) {\n t = t->right;\n } else if (timestamp < current_key) {\n\n d++;\n Tree *tmp = t->right;\n if (tmp != NULL) {\n d = d + tmp->size;\n };\n t = t->left;\n\n } else {\n Tree *tmp = t->right;\n if (tmp != NULL) {\n\n d = d + tmp->size;\n };\n return d;\n }\n\n }\n\n}\n\nint calc_distance_lfu(T f, Tree_lfu *t, uint64_t T_i) {\n\n int d = 1;\n int current_key = 0;\n for (;;) {\n current_key = t->key;\n if (f > current_key) {\n t = t->right;\n } else if (f < current_key) {\n\n d = d + t->list_size + node_size(t->right);\n //Tree_lfu *tmp = t->right;\n //if(tmp != NULL){\n //\td = d + tmp->size;\n //};\n t = t->left;\n\n } else {\n //Tree_lfu *tmp = t->right;\n //if (tmp != NULL) {\n // d = d + tmp->size;\n //};\n //return 1;\n d = d + node_size(t->right) + (t->list_size - g_list_index(t->list, GUINT_TO_POINTER(T_i)) - 1);\n\n return d;\n }\n\n }\n\n}\n\nint calc_distance_mq(T f, Tree_mq *t, uint64_t T_i) {\n\n int d = 1;\n int current_key = 0;\n for (;;) {\n current_key = t->key;\n if (f > current_key) {\n t = t->right;\n } else if (f < current_key) {\n\n d = d + t->list_size + node_size(t->right);\n //Tree_lfu *tmp = t->right;\n //if(tmp != NULL){\n //\td = d + tmp->size;\n //};\n t = t->left;\n\n } else {\n //Tree_lfu *tmp = t->right;\n //if (tmp != NULL) {\n // d = d + tmp->size;\n //};\n //return 1;\n d = d + node_size(t->right) + (t->list_size - g_list_index(t->list, GUINT_TO_POINTER(T_i)) - 1);\n\n return d;\n }\n\n }\n\n}\n\nuint64_t get_first_from_freq_mq(T f, Tree_mq *t) {\n\n int current_key = 0;\n gpointer p;\n while (t != NULL) {\n current_key = t->key;\n if (f > current_key) {\n if (t->right == NULL)\n return 0;\n else\n t = t->right;\n } else if (f < current_key) {\n if (t->left == 0)\n return 0;\n else\n t = t->left;\n } else {\n //Tree_lfu *tmp = t->right;\n //if (tmp != NULL) {\n // d = d + tmp->size;\n //};\n //return 1;\n if (g_list_length(t->list) == 0)\n return 0;\n\n p = g_list_nth_data(t->list, 0);\n // printf(\"in get function %d\\n\", GPOINTER_TO_UINT(p));\n return GPOINTER_TO_UINT(p);\n }\n }\n return 0;\n}" }, { "alpha_fraction": 0.5733813047409058, "alphanum_fraction": 0.638129472732544, "avg_line_length": 23.821428298950195, "blob_id": "bf95d40fc784823396ceab3f11fb98f9d06e0509", "content_id": "5d81573caab48fa690f347cd0c3922317dc7f668", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 135, "num_lines": 56, "path": "/Exp1.sh", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#!/bin/bash\necho \"Exp1 is about run time analysis\"\n\ntrace='prn_0.csv.gz.out'\ncfg='minisim_minisim_prn_0.0.01.mrc_LRU_LRU.cfg'\n#cfg is not used here\nR='0.02'\nstep_size='19921'\nres='50'\npolicy='LRU'\noutput='test_0'\necho \"doing for trace $trace \" \necho \"\\n\"\necho \"R=$R, step = $res\"\necho \"time for single tier cache LRU is \"\nmytime=$(time ./shards_test 20 $step_size $R ../traces/msr/$trace $output $policy 2>&1 1>/dev/null )\necho \"$mytime\" | head -n 1 | cut -d' ' -f1\n\necho \"\"\necho \"time for 2 tier cache LRU is \"\nmytime=$(time ./shards_3d_minisim2 20 ../traces/$cfg $R ../traces/msr/$trace $output $policy $policy 10000000 $res 2>&1 1>/dev/null )\necho \"$mytime\" | head -n 1 | cut -d' ' -f1\n\necho \"\"\necho \"time for 3 tier cache LRU is \"\n\nfor i in *.csv\ndo\n\tpython count_unique.py $i $i.count 2>&1 1>/dev/null\ndone\nstart=`date +%s`\nfor i in *.csv\ndo \n echo \"$i\"\n ./shards_3d_minisim2.1 20 1 1.0 ./$i $output $policy $policy 10000000 $res 2>&1 1>/dev/null\ndone\nend=`date +%s`\nruntime=$((end - start))\necho \"$runtime\"\n#echo \"$mytime\" | head -n 1 | cut -d' ' -f1\necho \"\"\necho \"time for 4 tier cache LRU is \"\n\nfor i in *.csv\ndo\n\tpython count_unique.py $i $i.count 2>&1 1>/dev/null\ndone\nstart=`date +%s`\nfor i in *.csv\ndo \n echo \"$i\"\n ./shards_3d_minisim2.1 20 1 1.0 ./$i $output $policy $policy 10000000 $res 2>&1 1>/dev/null\ndone\nend=`date +%s`\nruntime=$((end - start))\necho \"$runtime\"\n" }, { "alpha_fraction": 0.6247308254241943, "alphanum_fraction": 0.6398061513900757, "avg_line_length": 27.569231033325195, "blob_id": "ba2cb0d84ac87db0ec78b0ae8160356b08608976", "content_id": "6694c42c2c1ad4619b45af6241533c6d0d78c47d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 5572, "license_type": "no_license", "max_line_length": 118, "num_lines": 195, "path": "/src/shards_3d_layer1.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h>\n#include <glib.h>\n#include <time.h>\n#include <stdint.h>\n#include \"SHARDS.h\"\n\nvoid close_sub_table(gpointer key, gpointer value, gpointer userdata){\n\t//fclose(value);\n\tg_hash_table_destroy(value);\n}\nint main(int argc, char** argv){\n\n\t/*\targv[1] = length of each object.\n\t\targv[2] = bucket size\n\t\targv[3] = R\n\t\targv[4] = Tracefile\n\t\targv[5] = mrc file\n\t\targv[6] = op flag e.g. LRU LFU, etc\n\t*/\n\tint val;\n\tGHashTable *bucket_table = g_hash_table_new(g_direct_hash, g_direct_equal);\n\tchar filename[sizeof \"0001.csv\"];\n\n\tif(strcmp(argv[6], \"LRU\") != 0 && strcmp(argv[6], \"LFU\") != 0){\n\t\tprintf(\"%s\",argv[6]);\n\t\tprintf(\"Current only support LRU and LFU.\\n\");\n\t\treturn -1;\n\t};\n\tprintf(\"SHARDS\\n\");\n\tint obj_length = strtol(argv[1],NULL,10);\n\n\tchar* object = (char*)calloc((obj_length+2),sizeof(char));\n\tchar* shadow_object = NULL;\n\tint bucket = strtol(argv[2],NULL,10);\n\t\n\t\n\n\tdouble R = strtod(argv[3], NULL);\n\tSHARDS *shards = NULL;\n\tif(strcmp(argv[6], \"LRU\") == 0)\n\t\tshards = SHARDS_fixed_size_init_R( 320000,R, (unsigned int)bucket, String);\n\telse if(strcmp(argv[6], \"LFU\") == 0)\n\t\tshards = SHARDS_fixed_size_init_R_LFU( 320000,R, (unsigned int)bucket, String);\n\tFILE *file;\n\tfile = fopen(argv[4], \"r\");\n\tclock_t start_time = clock();\n\n\tint cnt = 0;\n\tint bucket_cnt = 0;\n\n\twhile(fgets(object, obj_length+2, file)!=NULL){\n\t\tshadow_object = (char*)calloc((obj_length+2),sizeof(char));\n\t\tstrncpy(shadow_object, object, obj_length+2);\n\t\tval = SHARDS_feed_obj(shards, object, obj_length, argv[6]);\n\t\t//printf(\"val is %d \\n\", val);\n\t\tif(val != -1){\t\n\t\t\tif (!g_hash_table_contains(bucket_table, GINT_TO_POINTER(val))){\n\t\t\t\t//sprintf(filename, \"%04d.csv\", val/bucket);\n\t\t\t\t//FILE *sub_f = fopen(filename,\"w\");\n\t\t\t\tGHashTable *sub_table = g_hash_table_new(g_direct_hash, g_direct_equal);\n\t\t\t\tg_hash_table_insert(bucket_table, GINT_TO_POINTER(val), GINT_TO_POINTER(sub_table));\n\t\t\t\t//g_hash_table_insert(bucket_table, &val, sub_f);\n\t\t\t\t//fprintf(sub_f, \"%s,%d\\n\", strtok(shadow_object, \"\\n\"), cnt);\n\t\t\t\tg_hash_table_insert(sub_table, GINT_TO_POINTER(cnt), shadow_object);\n\t\t\t\tbucket_cnt++;\n\t\t\t}\n\t\t\telse{\n\t\t\t\t//fprintf(g_hash_table_lookup(bucket_table, &val), \"%s,%d\\n\", strtok(shadow_object, \"\\n\"), cnt);\n\t\t\t\tg_hash_table_insert(g_hash_table_lookup(bucket_table, GINT_TO_POINTER(val)), GINT_TO_POINTER(cnt), shadow_object);\n\t\t\t}\n\t\t}\n\t\t//else{\n\t\t//\tprintf(val);\n\t\t//}\n\n\t\tobject = (char*)calloc((obj_length+2),sizeof(char));\n\t\tcnt++;\n\t\t/*\n\t\tif(cnt==100000){\n\t\t\tbreak;\n\t\t}\n\t\t*/\n\t}\n\t// generate sub inputs for 2nd layer calculation\n\tint index = 0;\n\n\tGHashTable *out_table = g_hash_table_lookup(bucket_table, GINT_TO_POINTER(index));\n\tprintf(\"g hash table size is :%d\\n\",g_hash_table_size(out_table));\n\n\n\tindex = 262144;\n\tprintf(\"g hash table size is :%d\\n\",g_hash_table_size(g_hash_table_lookup(bucket_table, GINT_TO_POINTER(index))));\n\n\tbucket_cnt--;\t\n\tsprintf(filename, \"%04d.csv\", bucket_cnt);\n\tFILE *out_f = fopen(filename, \"w\");\n\tGList *out_keys = g_hash_table_get_keys(out_table);\n\tout_keys = g_list_sort(out_keys, (GCompareFunc) intcmp_gdirect );\n\twhile(out_keys!=NULL){\n\t\tfprintf(out_f, \"%s\", (char*)g_hash_table_lookup(out_table, out_keys->data));\n\t\t//printf(out_keys->data);\n\t\tout_keys = out_keys->next;\n\t}\n\tfclose(out_f);\n\n\tfor(int bucket_c = bucket_cnt -1; bucket_c >= 0; bucket_c --){\n\t\tindex = (bucket_c+1)*bucket;\n\t\tGHashTable *tmp_table = g_hash_table_lookup(bucket_table, GINT_TO_POINTER(index));\n\t\t\n\t\tprintf(\"index is %d, g hash table size is :%d\\n\", index, g_hash_table_size(out_table));\t\n\t\t\n\t\tGList *tmp_keys = g_hash_table_get_keys(tmp_table);\n\t\twhile(tmp_keys != NULL){\n\t\t\tg_hash_table_insert(out_table,tmp_keys->data, g_hash_table_lookup(tmp_table, tmp_keys->data));\n\t\t\ttmp_keys = tmp_keys->next;\n\t\t}\n\t\tsprintf(filename, \"%04d.csv\", bucket_c);\n\t\tout_f = fopen(filename, \"w\");\n\t\tGList *out_keys = g_hash_table_get_keys(out_table);\n\t\tout_keys = g_list_sort(out_keys, (GCompareFunc) intcmp_gdirect );\n\t\twhile(out_keys!=NULL){\n\t \tfprintf(out_f, \"%s\", (char*) g_hash_table_lookup(out_table, out_keys->data));\n\t\t\tout_keys = out_keys->next;\n\t\t}\n\t\tfclose(out_f);\n\n\t}\n\n\n\t//close all sub_f handles\n\tg_hash_table_foreach(bucket_table, close_sub_table, NULL);\n\tg_hash_table_destroy(bucket_table);\n\t\n\n //fclose(file);\n\n\t//printf(\"Loop 1 ended.\\n\");\n\tGHashTable *mrc = MRC(shards);\n\n\t\n\tFILE *mrc_file = fopen(argv[5],\"w\");\n\tGList *keys = g_hash_table_get_keys(mrc);\n\tkeys = g_list_sort(keys, (GCompareFunc) intcmp);\n GList *first = keys;\n\twhile(keys!=NULL){\n\t\t//printf(\"%d,%1.7f\\n\",*(int*)keys->data, *(double*)g_hash_table_lookup(mrc, keys->data) );\n\t\tfprintf(mrc_file,\"%7d,%1.7f\\n\",*(int*)keys->data, *(double*)g_hash_table_lookup(mrc, keys->data) );\n\n\n\t\tkeys=keys->next;\n\t}\n\n\t/*\n\tg_hash_table_destroy(mrc);\n\n\n\t//HERE STARS THE SECOND LOOP READING WHATS LEFT OF THE TRACE FILE\n\twhile(fgets(object, obj_length+2, file)!=NULL){\n\t\t\n\t\n\t\tSHARDS_feed_obj(shards, object, obj_length);\n\t\t\n\t\tobject = (char*)calloc((obj_length+2),sizeof(char));\n\t\tcnt++;\n\t\t\n\t}\n\n\tfree(object);\n\tprintf(\"Loop 2 ended \\n\");\n\tmrc = MRC(shards);\n\t\n\tprintf(\"MRC created.\\n\");\n\t*/\n\tclock_t end_time = clock();\n\t\n\tfclose(mrc_file);\n\tg_list_free(first);\n\t\n\tg_hash_table_destroy(mrc);\n\t\n\tprintf(\"%ld\\n\", start_time);\n\tprintf(\"%ld\\n\", end_time);\n\tint total_time = ((end_time - start_time))/CLOCKS_PER_SEC;\n\tprintf(\"TIME: %d\\n\", total_time);\n\tunsigned int objects_parsed = shards->total_objects;\n \n\tdouble throughput = objects_parsed/(total_time+1);\n SHARDS_free(shards);\n\tprintf(\"Throughput: %f\\n\", throughput);\n\n\t//SHARDS_free(shards);\n return 0;\n\n}\n\n" }, { "alpha_fraction": 0.6803653240203857, "alphanum_fraction": 0.689497709274292, "avg_line_length": 17.25, "blob_id": "380418a4ed3eeb5d2c2f9e5f68c1416fba223c2e", "content_id": "7e16125629cfe26186d73d79504a7e2d30c7408f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/count_unique.py", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "import sys\nfrom collections import Counter\nf = open(sys.argv[1], \"r\")\nlines = f.readlines()\n\nprint len(Counter(lines))\n\nout = open(sys.argv[2], \"w\")\nout.write(str(len(Counter(lines))))\nout.write(\"\\n\")\nout.close\nf.close\n" }, { "alpha_fraction": 0.5242984294891357, "alphanum_fraction": 0.5448322892189026, "avg_line_length": 23.350000381469727, "blob_id": "767d489f4c04cb4923dd1a0578bc0b399133e060", "content_id": "1aa78f0a00af420721827ddc8227915db6c58691", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 2922, "license_type": "no_license", "max_line_length": 110, "num_lines": 120, "path": "/src/shards_test.c", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#include <stdlib.h>\n#include <stdio.h>\n#include <glib.h>\n#include <time.h>\n#include <stdint.h>\n#include \"SHARDS.h\"\n\nint main(int argc, char** argv) {\n\n /*\targv[1] = length of each object.\n argv[2] = bucket size\n argv[3] = R\n argv[4] = Tracefile\n argv[5] = mrc file\n argv[6] = op flag e.g. LRU LFU, etc\n */\n if (strcmp(argv[6], \"LRU\") != 0 && strcmp(argv[6], \"LFU\") != 0) {\n printf(\"%s\", argv[6]);\n printf(\"Current only support LRU and LFU.\\n\");\n return -1;\n };\n printf(\"SHARDS\\n\");\n int obj_length = strtol(argv[1], NULL, 10);\n\n char* object = (char*) calloc((obj_length + 2), sizeof (char));\n\n int bucket = strtol(argv[2], NULL, 10);\n\n\n\n double R = strtod(argv[3], NULL);\n SHARDS *shards = NULL;\n if (strcmp(argv[6], \"LRU\") == 0)\n shards = SHARDS_fixed_size_init_R(320000, R, (unsigned int) bucket, String);\n else if (strcmp(argv[6], \"LFU\") == 0)\n shards = SHARDS_fixed_size_init_R_LFU(320000, R, (unsigned int) bucket, String);\n FILE *file;\n file = fopen(argv[4], \"r\");\n clock_t start_time = clock();\n\n int cnt = 0;\n\n while (fgets(object, obj_length + 2, file) != NULL) {\n\n\n SHARDS_feed_obj(shards, object, obj_length, argv[6], 1);\n\n object = (char*) calloc((obj_length + 2), sizeof (char));\n cnt++;\n /*\n if(cnt==100000){\n break;\n }\n */\n }\n\n\n\n\n\n //fclose(file);\n\n //printf(\"Loop 1 ended.\\n\");\n GHashTable *mrc = MRC(shards);\n\n\n FILE *mrc_file = fopen(argv[5], \"w\");\n GList *keys = g_hash_table_get_keys(mrc);\n keys = g_list_sort(keys, (GCompareFunc) intcmp);\n GList *first = keys;\n while (keys != NULL) {\n //printf(\"%d,%1.7f\\n\",*(int*)keys->data, *(double*)g_hash_table_lookup(mrc, keys->data) );\n fprintf(mrc_file, \"%7d,%1.7f\\n\", *(int*) keys->data, *(double*) g_hash_table_lookup(mrc, keys->data));\n\n\n keys = keys->next;\n }\n\n /*\n g_hash_table_destroy(mrc);\n\n\n //HERE STARS THE SECOND LOOP READING WHATS LEFT OF THE TRACE FILE\n while(fgets(object, obj_length+2, file)!=NULL){\n\n\n SHARDS_feed_obj(shards, object, obj_length);\n\n object = (char*)calloc((obj_length+2),sizeof(char));\n cnt++;\n\n }\n\n free(object);\n printf(\"Loop 2 ended \\n\");\n mrc = MRC(shards);\n\n printf(\"MRC created.\\n\");\n */\n clock_t end_time = clock();\n\n fclose(mrc_file);\n g_list_free(first);\n\n g_hash_table_destroy(mrc);\n\n printf(\"%ld\\n\", start_time);\n printf(\"%ld\\n\", end_time);\n int total_time = ((end_time - start_time)) / CLOCKS_PER_SEC;\n printf(\"TIME: %d\\n\", total_time);\n unsigned int objects_parsed = shards->total_objects;\n\n double throughput = objects_parsed / (total_time + 1);\n SHARDS_free(shards);\n printf(\"Throughput: %f\\n\", throughput);\n\n //SHARDS_free(shards);\n return 0;\n\n}\n" }, { "alpha_fraction": 0.6815849542617798, "alphanum_fraction": 0.6912770867347717, "avg_line_length": 20.648147583007812, "blob_id": "ae2d58292e926999413b6255c20036b4f355b499", "content_id": "aca97ad9fd48a9dc1a69148f9e433365eeed55f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 3508, "license_type": "no_license", "max_line_length": 127, "num_lines": 162, "path": "/include/SHARDS.h", "repo_name": "lzhzero/SHARDS-C", "src_encoding": "UTF-8", "text": "#ifndef __SHARDS_H\n#define __SHARDS_H\n/*\n#ifdef __cplusplus\nextern \"C\"{\n#endif\n */\n#include <stdlib.h>\n#include <stdio.h>\n#include <glib.h>\n#include <string.h>\n#include <stdint.h>\n#include <inttypes.h>\n#include \"shards_utils.h\"\n\ntypedef enum {\n FIXED_RATE,\n FIXED_SIZE,\n} shards_version;\n\ntypedef enum {\n String,\n Int,\n Uint64,\n Double,\n} object_Type;\n\ntypedef struct rich_T TNODE;\n\nstruct rich_T {\n uint64_t T;\n uint64_t freq;\n uint64_t ref;\n void *obj;\n};\ntypedef struct shards_elem SHARDS;\n\nstruct shards_elem {\n shards_version version;\n object_Type dataType;\n\n double initial_R_value;\n double R;\n uint64_t P;\n uint64_t T;\n\n //Data structure for shard_3d\n uint64_t layer1_resolution;\n\n //Data structures needed for SHARDS_fixed_rate\n /*\n dist_tree=\n time_table pairs each read object with the last reference time it was read.\n\n\n */\n Tree *dist_tree;\n GHashTable *time_table;\n GHashTable *dist_histogram;\n //Additional data structures needed for SHARDS_fixed_size\n GHashTable *set_table;\n Tree *set_tree;\n Tree *evic_tree;\n GList *set_list;\n GList *set_list_search;\n\n /*data structure for LFU*/\n GHashTable *freq_table;\n Tree_lfu *dist_tree_lfu;\n\n unsigned int S_max;\n unsigned int set_size;\n\n unsigned int bucket_size;\n\n\n /*data structure for MQ*/\n\n\n int max_f_mq;\n GHashTable *mq_freq_table;\n GHashTable *mq_tick_table;\n Tree_mq *dist_tree_mq;\n GList *mq_out;\n\n //Counter for the amount of evicted objects\n unsigned int evic_obj;\n\n\n unsigned int total_objects;\n int num_obj;\n double fraction;\n\n uint64_t tick;\n uint64_t life_time;\n};\n\n\nSHARDS* SHARDS_fixed_rate_init(double R_init, unsigned int bucket_size, object_Type type);\n\nSHARDS* SHARDS_fixed_size_init(unsigned int max_setsize, unsigned int bucket_size, object_Type type);\n\nSHARDS* SHARDS_fixed_size_init_R(unsigned int max_setsize, double R_init, unsigned int bucket_size, object_Type type);\n\n/*LFU implementation*/\nSHARDS* SHARDS_fixed_size_init_R_LFU(unsigned int max_setsize, double R_init, unsigned int bucket_size, object_Type type);\n//void SHARDS_feed_obj_LFU(SHARDS *shards, void* object, size_t nbytes);\n\nSHARDS* SHARDS_fixed_size_init_R_MQ(unsigned int max_setsize, double R_init, unsigned int bucket_size, object_Type type);\n\n\nint SHARDS_feed_obj(SHARDS *shards, void* object, size_t nbytes, char* algo, int level);\n// returns int bucket\n\nvoid SHARDS_free(SHARDS* shards);\n\nGHashTable *MRC(SHARDS* shards);\n\nGHashTable *MRC_empty(SHARDS* shards);\n\n//This is used for g_direct int comparison\nint intcmp_gdirect(const void *x, const void *y);\n\nint intcmp(const void *x, const void *y);\n\nint uint64cmp(const void *x, const void *y);\n\nguint g_uint64_hash(gconstpointer v);\n\ngboolean g_uint64_equal(gconstpointer v1, gconstpointer v2);\n\nint doublecmp(const void *x, const void *y);\n\n\n//private functions\n/*\nunsigned int calc_reuse_dist(void *object, unsigned int num_obj, GHashTable **time_table, Tree **tree, shards_version version);\n\nvoid update_dist_table(uint64_t reuse_dist ,GHashTable **dist_table);\n\nvoid update_dist_table_fixed_size(uint64_t reuse_dist, GHashTable **dist_table, uint64_t T_new);\n\n\nGHashTable *MRC_fixed_rate(SHARDS* shards);\n\nGHashTable *MRC_fixed_rate_empty(SHARDS* shards);\n\nGHashTable *MRC_fixed_size(SHARDS *shards);\n\nGHashTable *MRC_fixed_size_empty(SHARDS *shards);\n\n\nbool dummy(void* x);\n */\n\n/*\n#ifdef __cplusplus\n}\n#endif\n */\n\n#endif\n\n" } ]
21
Oxerene/PDF-Merger
https://github.com/Oxerene/PDF-Merger
62d836a50eca42c5e2109b266bbe2b1d443aeca2
4a2013c55471a383edb2e093660e611d4df12d13
6d689904d8ee966051a98b61a6137ab4ad0ce827
refs/heads/main
2023-05-27T18:19:21.176238
2021-06-16T22:02:13
2021-06-16T22:02:13
377,612,729
0
0
null
2021-06-16T19:58:54
2021-06-16T19:58:57
2021-06-16T22:02:13
null
[ { "alpha_fraction": 0.6601941585540771, "alphanum_fraction": 0.6747573018074036, "avg_line_length": 16.25, "blob_id": "2f59c1a74e50f81091f9ddb34789f2721bc643a8", "content_id": "d2c9d90583a01cbb10400f8e46aee11d0c9a9cc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/pdfmerger.py", "repo_name": "Oxerene/PDF-Merger", "src_encoding": "UTF-8", "text": "import sys\nimport PyPDF2\n\nfiles = sys.argv[1:]\n\ndef pdfMerge(pdf_list):\n merger = PyPDF2.PdfFileMerger()\n for pdf in pdf_list:\n merger.append(pdf)\n merger.write('super.pdf')\n\npdfMerge(files)" } ]
1
OlegGlo/2D04
https://github.com/OlegGlo/2D04
ec879a335ec1c10e12047251e26d91368924ff14
f872e2b2d06614f29c7eed7f3616613f19aa1d56
57eace8eeca00afe6881422d944f1c91a5a25f5b
refs/heads/master
2022-08-01T03:49:51.506014
2020-05-23T19:57:57
2020-05-23T19:57:57
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.37931033968925476, "alphanum_fraction": 0.4137931168079376, "avg_line_length": 8.5, "blob_id": "5b969729e98fad25aa4175db03320645f46a485e", "content_id": "dbdedf7828cb27a704a041a272ba64824e2d0f03", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 58, "license_type": "no_license", "max_line_length": 13, "num_lines": 6, "path": "/finalExam/prob53.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "a = 1\ndef b(c):\n d = a + c\n return d\n\nprint(b(2))\n\n" }, { "alpha_fraction": 0.37735849618911743, "alphanum_fraction": 0.6037735939025879, "avg_line_length": 6.285714149475098, "blob_id": "c70b1c8d804230d21feea97b1a2acde10189de79", "content_id": "216582d803014fb57b3bb23abbc603fb6b36ff66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 53, "license_type": "no_license", "max_line_length": 12, "num_lines": 7, "path": "/finalExam/prob57.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\nprint(4%2)\n\nprint(15%7)\n\nprint(25%12)\n\nprint(38%6)\n\n" }, { "alpha_fraction": 0.3499999940395355, "alphanum_fraction": 0.4000000059604645, "avg_line_length": 3, "blob_id": "a374b8837747b113fec895c0adc1b262d6c5128b", "content_id": "e4386f4237a00b96aa7efdf74510aea6a132be9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20, "license_type": "no_license", "max_line_length": 9, "num_lines": 5, "path": "/Week 6/Lecture 6.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n'''\n\nLecture 6\n\n'''" }, { "alpha_fraction": 0.41015625, "alphanum_fraction": 0.43359375, "avg_line_length": 9.239999771118164, "blob_id": "5e00c4911c6a2b6cc468d8e85dcfacb45e986caf", "content_id": "8d5a26724b2d2fc60fba1416ad4d4362aef19597", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 30, "num_lines": 25, "path": "/finalExam/prob21.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\ndef base():\n\n b = \"hello, Jupyter\"\n c = \"\"\n\n for a in b:\n c += chr(ord(a)-10)\n\n print(c)\n\n\n\ndef option4():\n\n b = \"hello, Jupyter\"\n c = \"\"\n\n for i in range(len(b)):\n\n c += chr(ord(b[i])-10)\n\n print(c)\n\nbase()\noption4()" }, { "alpha_fraction": 0.5502008199691772, "alphanum_fraction": 0.5742971897125244, "avg_line_length": 8.576923370361328, "blob_id": "7c4050a644c0dc322feb416e963a084fff4bd9a6", "content_id": "de23813f3a8176c6a1636f18da70242dfd3b0d23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 34, "num_lines": 26, "path": "/Week 5/Lecture 5.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n'''\nfucntions (basic shit)\n\nwe can add lists (a + b)\n\nglobal vs local variables\n\n\n\n'''\n\nx,y=10,20\n\ndef example(x):\n global y\n #print(x,y)\n y = y + 10\n\n print(x,y)\n return x\n\nexample(x)\n\nprint (x,y)\n\n#this is global / local example^^^" }, { "alpha_fraction": 0.6207046508789062, "alphanum_fraction": 0.6441931128501892, "avg_line_length": 21.865671157836914, "blob_id": "3eebd8fc3ef4a2358d58365e72200b7e593c5531", "content_id": "0680f89dd8b1ce4505f6bfbbfd97866fcff0657e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4599, "license_type": "no_license", "max_line_length": 110, "num_lines": 201, "path": "/Week 10/onlineLab5_Problem1.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n#XRD_example1.txt\n#XRD_example2.txt\n\nKCONST = 0.9\n\nWAVELENGTH = 0.07107 #defining constants\n\ndef Convert(file):\n\n filelines = file.readlines() #file handling\n\n degrees = []\n intensity = []\n\n peakDegrees = []\n peakIntensity = []\n\n for i in range(len(filelines)): #File handling - converting data into 4 lists\n\n line = filelines[i].split()\n\n currentdegrees = line[0]\n currentintensity = line[1]\n\n if currentintensity.find(\"*\") > 0: #finding peak values and adding them to the peak lists\n\n stringfixed = currentintensity.replace(\"*\",\"\") #taking out the start for graphing and calculations\n\n peakDegrees.append(float(line[0]))\n peakIntensity.append(float(stringfixed))\n\n degrees.append(float(line[0]))\n intensity.append(float(stringfixed))\n\n else:\n\n degrees.append(float(line[0]))\n intensity.append(float(line[1]))\n\n return degrees,intensity,peakDegrees,peakIntensity\n\ndef Plottingthis(degrees,intensity):\n\n plt.plot() #plotting the data\n\n plt.plot(degrees,intensity)\n \n plt.xlabel(\"2θ (deg)\") #formatting axis\n plt.ylabel(\"Intesnity (a.u.)\")\n plt.title(\"XRD plot\") \n\n plt.show()\n\n return 1\n\ndef CalculateT(degrees,intensity,peakDegrees,peakIntensity):\n\n Bvalue = 0\n Bvalues = []\n\n TvalueAvg = 0\n Tvalues = []\n\n startiter = []\n\n for i in range(len(intensity)):\n for a in range(len(peakIntensity)):\n if degrees[i] == peakDegrees[a]-0.5:\n startiter.append(i) #Selecting the position to start the loop\n \n for i in range(len(peakIntensity)): #Calculating the B value for each peak\n\n halfMaxInt = peakIntensity[i]/2\n\n Bvalues.append(CalculateB(halfMaxInt,degrees,intensity, startiter[i]))\n\n BvalueRad = math.radians(Bvalues[i])\n\n Tvalues.append((WAVELENGTH*KCONST)/(BvalueRad*math.cos(math.radians(peakDegrees[i]))))\n\n avg = AverageT(Tvalues)\n\n return avg\n\ndef AverageT(Tvalues):\n\n TvalueAvg = sum(Tvalues)/len(Tvalues) #avaraging the values\n\n try:\n TvalueAvg = sum(Tvalues)/len(Tvalues)\n except ZeroDivisionError:\n print(\"Zero division\")\n\n return TvalueAvg\n\ndef CalculateB(halfMaxInt,degrees,intensity, startpoint):\n\n Bmin = []\n Bmax = []\n\n for i in range(startpoint,len(degrees)): #finding the values cloeset to the desired half intensity\n\n if intensity[i] < halfMaxInt:\n\n d1 = intensity[i]\n\n d2 = intensity[i+1]\n\n if d2 > halfMaxInt:\n pass\n \n if intensity[i] > halfMaxInt:\n\n b1 = intensity[i]\n\n b2 = intensity[i+1]\n\n if b2 < halfMaxInt:\n break\n \n c1 = abs(halfMaxInt - b1)\n c2 = abs(halfMaxInt - b2) \n\n a1 = abs(halfMaxInt - d1)\n a2 = abs(halfMaxInt - d2)\n\n if c1 > c2:\n Bmax.append(b2)\n else:\n Bmax.append(b1)\n\n if a1 > a2:\n Bmin.append(d2)\n else:\n Bmin.append(d1) #finding the closest intensity values to the desired value\n\n BminDegrees = 0 #converting those intensity values into degrees because i didnt read ahead\n BmaxDegrees = 0\n\n for i in range(startpoint,len(degrees)): #convert intensity values into degree values\n if intensity[i] == Bmin[0]:\n BminDegrees = degrees[i]\n\n if intensity[i] == Bmax[0]:\n BmaxDegrees = degrees[i]\n\n Bvalue = abs(BmaxDegrees - BminDegrees) #determining the B value for given half intensity\n\n return Bvalue\n\ndef XDR_analysis(file): #Main, calls everthing\n\n data = open(file,'r')\n\n degrees,intensity,peakDegrees,peakIntensity = Convert(data) #Converts raw data into two lists\n\n Tvalue = CalculateT(degrees,intensity,peakDegrees,peakIntensity) #Calculating the T avg value\n\n Plottingthis(degrees,intensity) #Plotting the points\n\n print(\"T Value avg:\" + str(Tvalue)) #Printing \n\n return Tvalue\n\nXDR_analysis(\"XRD_example1.txt\")\n\n#Testing AverageT():\n\n#normal\n#print(AverageT([1,2,3,5]))\n\n#boundry\n#print(AverageT([0,100000000]))\n\n#abnormal\n#print(AverageT([]))\n#division by zero\n\n'''\nTest 1 for function AverageT()\nInput: AverageT([1,2,3,5])\nExpected Output: 2.75\nActual Output: 2.75\nResult: Pass\n\nTest 2 for function AverageT()\nInput: AverageT([0,100000000])\nExpected Output: 50000000\nActual Output: 50000000.0\nResult: Pass\n\nTest 3 for function AverageT()\nInput: AverageT([])\nExpected Output: zero division error\nActual Output: ZeroDivisionError: division by zero\nResult: Pass\n'''\n\n" }, { "alpha_fraction": 0.378947377204895, "alphanum_fraction": 0.4526315927505493, "avg_line_length": 8.199999809265137, "blob_id": "3fb42e39c6a13e64cbfd1549772065e83cf428ed", "content_id": "6c32f1f0df00aa20f1bdc25666b63698c7a95086", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 95, "license_type": "no_license", "max_line_length": 17, "num_lines": 10, "path": "/finalExam/prob29.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "sum = 0\niter = 0\ni = 1\nwhile i< 10:\n sum = sum + i\n iter += 1\n\ni = i + 1\n\nprint(iter)\n\n\n\n" }, { "alpha_fraction": 0.5461441278457642, "alphanum_fraction": 0.5739570260047913, "avg_line_length": 13.961538314819336, "blob_id": "6df600da715caebf7bd91b5ccfaf9936e3b5ebb6", "content_id": "ab9e630e4ca03ab394618c53f85056f0e8659954", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 791, "license_type": "no_license", "max_line_length": 43, "num_lines": 52, "path": "/Week 4/Tutorial 3.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n'''\n72_base_10 calculations\n\nwhile for loops\n\n\" end=\"\" \" in a print\n\nfuck around with plots in pylab library\n\nSome examples:\n'''\n\nimport pylab\nimport math\n\ndef function1():\n values = list(range(10))\n\n values2 = []\n for index in values:\n values2.append(index**2)\n\n print(values)\n print(values2)\n\n#function1()\n\ndef function2():\n floatlist = []\n element = -10.0\n step = 0.5\n\n for i in range(41):\n floatlist.append(element)\n element = element + step\n\n print(floatlist)\n\nfunction2()\n\ndef function3():\n x = floatlist\n y_cos = []\n y_tan = []\n\n for value in x:\n y_cos.append(math.cos(value))\n y_tan.append(math.tan(value))\n\n pylab.title(\"asa\")\n pylab.plot(x,y_cos,\"r--\",x,y_tan,\"y--\")\n pylab.show()\n \n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.5481927990913391, "alphanum_fraction": 0.6325300931930542, "avg_line_length": 13.909090995788574, "blob_id": "fbc869ca71e5a7d8f199eedfc5fdf6dc08a19734", "content_id": "0cf9087eefa9325468a6fbbec894487c51ea9f6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 166, "license_type": "no_license", "max_line_length": 30, "num_lines": 11, "path": "/finalExam/prob37.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\nfrom matplotlib import *\nfrom math import *\n\np1 = Point(40,30)\np2 = Point(50,30)\n\ndeltaX = p2.getX() - p1.getX()\n\ndeltaY = p2.getY() - p1.getY()\n\nd = deltaY/deltaX\n\n" }, { "alpha_fraction": 0.5958980321884155, "alphanum_fraction": 0.6211197376251221, "avg_line_length": 24.77142906188965, "blob_id": "22e85de796ea1d8d52c371598632e3b4ee59f08c", "content_id": "8678c240259e960fdad029dbeeedf60b15d30b7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3608, "license_type": "no_license", "max_line_length": 178, "num_lines": 140, "path": "/Week 2/Lab 2.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n#Examples\n\nmyList = [1,2,3]\n\n#print(myList[1])\n\nmyList.append(\"hello\") #adds to end of list\n\nmyList.append(4)\n\n#myList2 = [1,2,3]\n\nlist(range(1,5)) #?????????????????????\n\n#Check syntax mylist[-1]\n\n# ------ Product notation\nQ = 5\n\ncounter = 1\n\nfor i in range(1,6):\n counter = counter * Q ** 2\n\nfinal_ans = counter * 3\n\n#print(final_ans)\n\n# -------- common mistakes\n\n#1. closing brackets\n#2. \n\n# --------- tips\n\n# can you the help function, e.g help() and they type into that (quit to exit the menu)\n\n# -------- actual lab:\n\nimport math\n\ndef minor2(initial_principle_p,interest_rate_r,number_per_year_n,time_in_years_t):\n\n A_value = 0\n B_value = []\n C_value = 0\n D_value = 0\n E_value = 0\n\n #A - Value\n\n A_value = initial_principle_p * (1+(interest_rate_r/number_per_year_n))**(number_per_year_n*time_in_years_t)\n\n #B - Value\n\n something = 1\n\n something = initial_principle_p\n\n for i in range(1, number_per_year_n*time_in_years_t+1): ###!!!\n\n something *= (1+(interest_rate_r/number_per_year_n))\n\n B_value.append(something)\n\n #C - Value\n\n C_value = initial_principle_p * math.exp(interest_rate_r*time_in_years_t)\n\n #D - Value\n\n D_value = 100 * ((A_value-B_value[-1])/B_value[-1])\n\n #E - Value\n\n E_value = 100 * (abs(A_value-C_value)/C_value)\n\n return A_value,B_value,C_value,D_value,E_value\n\n'''\nDESIGN QUESTIONS:\n\n1. Does your program enforce _n_ and _t_ to be integers? If so, why is this necessary? If not, is this a problem?\n\n It is neccesary as they are used to define the limits of the loop. i.e the loop cannot run to a half an iteration\n\n2. What is another way to calculate C? i.e without using the exp function.\n\n Using math.e ** ... is also an option which is basically using a constant. Another way is hardcoding your own constant\n\n *Using the loop approx. 1+(1/n) from lab 1\n\n3. Do you need to import math? If so, why? If not, why not?\n\n You do if you want to use the provided constants and equations. Dont have to if you want to hardcode them\n\n4. Does the range of the for loop in B need to start at 1? If so, why? If not, why not?\n\n No it does not since we do not use the interator anywhere, the loop has to have 20 entries though\n\n *starting at 1 is a good mathematical notation\n\nTESTING PLAN:\n\nTest 1\nInput: [p, r, n, t]\nExpected Output: [A, B (just the last value in the list), C, D, E] - FROM THE CALCULATOR\nActual Output: [Aa, Ba (just the last value in the list), Ca, Da, Ea] - FROM THE PYTHON FUNCTION\nResult: Pass/Fail\n\n\nANOTHER ERROR IS FEEDING THE LOOP WITH A FLOAT\n\n'''\n\ninitial_principle_p = [150, 1 ,200] \ninterest_rate_r = [0.3, 0.1, 0.2]\nnumber_per_year_n = [3, 1, 1]\ntime_in_years_t = [4, 1, 2]\n\nFinal_values = 0\n\nfor i in range(0,3):\n\n Final_values = minor2(initial_principle_p[i],interest_rate_r[i],number_per_year_n[i],time_in_years_t[i])\n\n temp = Final_values[1]\n\n temp2 = temp[-1]\n\n print(\"Test\" + str(i+1))\n print(\"Input: [\" +str(initial_principle_p[i])+\", \"+str(interest_rate_r[i])+\", \"+str(number_per_year_n[i])+\", \"+str(time_in_years_t[i])+\"]\")\n print(\"Expected Output: [\" + str(Final_values[0]) + \", \" + str(round(temp2,2)) + \", \" + str(Final_values[2]) + \", \" + str(Final_values[3]) + \", \" + str(Final_values[4])+ \"]\")\n print(\"Actual Output: [\" + str(Final_values[0]) + \", \" + str(round(temp2,2)) + \", \" + str(Final_values[2]) + \", \" + str(Final_values[3]) + \", \" + str(Final_values[4])+ \"]\")\n print(\"Result: Pass\")\n\n #inital_p_ at 0 will give error\n #number_per_year_n at 0 will give error\n\n #So use these for edge cases" }, { "alpha_fraction": 0.4285714328289032, "alphanum_fraction": 0.625, "avg_line_length": 21, "blob_id": "75eb8ed639f809b481d65989fba149b87246f92f", "content_id": "d6b3ae6c739f6976aea46fb062771d9ed6977d48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 112, "license_type": "no_license", "max_line_length": 49, "num_lines": 5, "path": "/finalExam/prob47.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\nimport math\n\nprint(3/4.0*math.pi*10**-12*math.sqrt(2.5*10**6))\n\nprint(3*math.pi*10**-12*math.sqrt(2.5*10**6))\n\n" }, { "alpha_fraction": 0.6531791687011719, "alphanum_fraction": 0.6748554706573486, "avg_line_length": 12.392156600952148, "blob_id": "968c6cdf59a23678000d0071a97647343dc008da", "content_id": "da38a1943ce99e87decd86c9e6d6dc3b867e2826", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 692, "license_type": "no_license", "max_line_length": 77, "num_lines": 51, "path": "/0-Introduction/introduction.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n#Comments are done like this\n\nprint(\"used to write something to console\")\n\n#variables are defined like this:\n\nnumber = 10\n\n#or\n\nbye = \"word bye\"\n\nhi = \"word hi\"\n\n#text strings can be added together\n\nmessage = bye + hi\n\n#if we want to include a number in the printed string conver the number using\n\nprint(hi + str(number))\n\n#if we need to have a multiple line string we use tripple quotes\n\nprint(\"\"\"hello\n\nthis will work\n\nlike this\"\"\")\n\n#can also use ' instead of \"\n\nprint('Hello')\n\n#Operators\n\ntwenty_five = 5**2 #power of two\n\n#an c++ += can be used to add to a variable\n\na = 1\n\na += 1\n\n#modulo sign can be used to find the remainder\n\nvar1 = 30\n\nvar2 = 28\n\nprint(var1 % var2) # = 2\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6317829489707947, "alphanum_fraction": 0.6317829489707947, "avg_line_length": 8.407407760620117, "blob_id": "f5b5162fc12c64adb00892baf4970652cea55094", "content_id": "0ecdbb3b30ed4e00b5ad2bcd11b08f68539acf39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 258, "license_type": "no_license", "max_line_length": 53, "num_lines": 27, "path": "/Week 4/lecture 4.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n'''\nOBJECTS\n\ngraphwin class\n\nand other stuff to do with it\n\ndrawing cicles and shit \n\na == b \nvs\na is b \n^\nwith lists\n\naliases with lists a and b can refer to the same list\n\n\n'''\n\n\nclass fuck:\n def fuckthis():\n print (\"fuck\")\n\n\nfuck.fuckthis()\n\n\n\n" }, { "alpha_fraction": 0.5535714030265808, "alphanum_fraction": 0.5714285969734192, "avg_line_length": 10.199999809265137, "blob_id": "70cf025f7e8327fd5b167f71c9a8e11ebf8d273d", "content_id": "46b51216d02612224c4a6a033385229547862407", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 56, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/finalExam/prob56.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "x, y = eval(input(\"Enter 2 num: \"))\n\nprint(x)\n\nprint(y)\n" }, { "alpha_fraction": 0.4325842559337616, "alphanum_fraction": 0.4606741666793823, "avg_line_length": 14.909090995788574, "blob_id": "9d2379409b03eb8ca918464f8bf188c95d5ccdf0", "content_id": "c8884438f5cbf6bebca72c386829c046d3aad291", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 178, "license_type": "no_license", "max_line_length": 35, "num_lines": 11, "path": "/finalExam/prob60.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\ndef myst(s):\n if len(s) <= 1:\n return True\n\n elif s[0] != s [len(s) -1]:\n return False\n\n else:\n return myst (s[1:len(s)-1])\n\nprint(myst(\"noon\"))\n\n\n" }, { "alpha_fraction": 0.6801915764808655, "alphanum_fraction": 0.6908988356590271, "avg_line_length": 26.6953125, "blob_id": "f2fa0f4e7b994ebb0fd1035058898646b329dba6", "content_id": "ce580bf35f5d7d44f42546f70c248b44aebf3cdf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3549, "license_type": "no_license", "max_line_length": 432, "num_lines": 128, "path": "/Week 5/Lab 5.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n\n'''\n\nlen() - length of string or list\n\nFile stuff:\n\nafile = open(\"name of that file\",'w')\nafile.write('line 1 \\n line 2')\nafile.close()\n\nprefixes: \n'r' - read\n'w' - write\nand others\n\nanotherfile = open(\"name of that file\",'r')\ncontent = anotherfile.read() or can do .readline() - > makes each line an entry in a list\nanotherfile.close()\nprint(content)\n\n\nstring processing:\n\nsplit() - splits the string into seperate words while forming a list\n\nx = \"word1 word2 word3\"\nprint(x.split())\n\n\nstrip() - removes whitespaces/lines/tabs aka compacts from beggining and end\n\nx = \"\\n some \\n stupid \\n text \\n here \\n\"\nstrippedstring = x.strip()\nprint(strippedstring)\n\n\n'''\n\n#LAB 5 - minor 3\n\n'''\ninfile1 = open(\"wc.txt\", 'w')\ninfile1.write(\"This will enter some text\\ninto the file I am testing.\")\ninfile1.close()\n\ninfile2 = open(\"wc2.txt\", 'w')\ninfile2.write(\"Abilities forfeited situation extremely my to he resembled.\\nOld had conviction discretion understood put principles you.\\nMatch means keeps round one her quick.\\nShe forming two comfort invited. Yet she income effect edward. Entire desire way design few.\\nMrs sentiments led solicitude estimating friendship fat. Meant those event is weeks state it to or. Boy but has folly charm there its. Its fact ten spot drew.\")\ninfile2.close()\n\ninfile3 = open(\"wc3.txt\", 'w')\ninfile3.write(\"Sussex result matter any end see. It speedily me addition weddings vicinity in pleasure. Happiness commanded an conveying breakfast in.\\nRegard her say warmly elinor. Him these are visit front end for seven walls.\\nMoney eat scale now ask law learn. Side its they just any upon see last. He prepared no shutters perceive do greatest. Ye at unpleasant solicitude in companions interested.\")\ninfile3.close()\n\ninfile4 = open(\"wc4.txt\", 'w')\ninfile4.write(\"9\")\ninfile4.close()\n'''\n\ndef minor3(a,b,c,d): \n \n filelist = [a,b,c,d]\n\n\n lineCount = 0 \n wordCount = 0 \n charCount = 0\n\n for filenum in range(len(filelist)):\n currentfile = open(filelist[filenum],\"r\")\n\n content = currentfile.read()\n\n #lines = []\n\n lines = len((open(filelist[filenum],\"r\").readlines()))\n\n char = len(content) - (lines - 1)\n\n words = len(open(filelist[filenum],\"r\").read().split()) \n\n print('File name:' + filelist[filenum])\n\n print('lines:',lines)\n\n print('words:',words)\n\n print('char:',char)\n\n currentfile.close()\n\n #print(content + \"\\n\")\n\n lineCount = lines\n wordCount = words\n charCount = char\n \n return lineCount, wordCount, charCount\n\nresult = minor3(\"wc.txt\",\"wc2.txt\",\"wc3.txt\",\"wc4.txt\")\n\n'''\nDesign questions:\n\n1. What is the best way to compute the number of lines, words, and characters \nin a file: by reading the file once, or by reading the file three times?\n\neither one, we could use seek(0,0) to move cursor to the beggining\n\nmore efficient to read once\n\n2. When printing to the screen, the syntax `print(\"x=\", x, \"\\n\")` and\n`print(\"x=\"+str(x)+\"\\n\")` are both valid. Is this true when using `file.write()`?\n\nwrite only accepts one argument, so you would have combine input into one big string before you write it\n\n3. Is it better if most of the test cases for your program \nare files containing many lines of text? Explain your answer\n\nno because we are testing edge cases or different types if symbol\nmake sure the file is not too big\n\n4. What is the difference between `read()` and `readlines()`?\n\nread() is just a big string, while readlines() is a list of strings depedning on their line\n\n\n\n'''\n\n\n" }, { "alpha_fraction": 0.7191011309623718, "alphanum_fraction": 0.7191011309623718, "avg_line_length": 8.88888931274414, "blob_id": "75bd4e7d0507e1c965c39127c8427478d5e1090b", "content_id": "ea0615225978b7046be0013025327be7beb36e2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 89, "license_type": "no_license", "max_line_length": 40, "num_lines": 9, "path": "/Week 8/Lecture 8.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n'''\nSome garbage, software developemnt\n\nOverall workflow and how it is organized\n\n\n\n\n'''" }, { "alpha_fraction": 0.5261121988296509, "alphanum_fraction": 0.5686653852462769, "avg_line_length": 12.179487228393555, "blob_id": "44e7be07acf5d5550be1dae9bae47007573da225", "content_id": "0852fb9f32ee84502e9fb37c728efe0c96d2c788", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 517, "license_type": "no_license", "max_line_length": 43, "num_lines": 39, "path": "/Week 1/Lab 1.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "#Lab 1 work\n\nimport math\n\ndef minor1(number):\n\n e = 0\n sigma = 0\n product = 1\n\n #1a - convergence\n\n e = (1+1/number)**number\n\n #1b - sigma notation\n \n for something in range(number+1):\n\n sigma += 5*(something**2)\n \n #print(sigma)\n\n #1c - profuct notation\n\n for somethingelse in range(1,number+1):\n\n product *= 3*(somethingelse**2)\n\n #print(product)\n\n return e, sigma, product\n\nresult = minor1(5)\n\nprint(result[0])\n\nprint(result[1])\n\nprint(result[2])\n\n\n\n" }, { "alpha_fraction": 0.349152535200119, "alphanum_fraction": 0.4033898413181305, "avg_line_length": 16.176469802856445, "blob_id": "19f706322ef2567edcd7a568c25d0c2770c52c9f", "content_id": "58cb63dd4ce3bdf094aebc8884752fd2d6780f4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 295, "license_type": "no_license", "max_line_length": 32, "num_lines": 17, "path": "/finalExam/prob52.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\ndef Interesting_res(x):\n\n if x ** 2 <= 100:\n if x % 4 ==2:\n return \"six\"\n\n elif not x + 5 > 2:\n return x - 50\n \n else:\n if x < 0 and abs(x) > 3:\n return False\n\n else:\n return x / 10\n\nprint(Interesting_res(9.0))\n\n\n" }, { "alpha_fraction": 0.5958762764930725, "alphanum_fraction": 0.6206185817718506, "avg_line_length": 14.15625, "blob_id": "bed5c2bbbad74a98c9bf98244a5e3ee19daccf24", "content_id": "8141c86cd373cdb0db371ef2276ef3825e0f7aae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 485, "license_type": "no_license", "max_line_length": 109, "num_lines": 32, "path": "/finalExam/prob40.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\ninf1 = open(\"noise.txt\",'r')\nnoise_full = inf1.readlines()\n\nprint(type(noise_full))\n\ninf1.close()\n\nhours = []\nnoise = []\n\nfor line in noise_full:\n print(type(line))\n\n\nfor line in noise_full:\n lineData = line.split(\",\")\n hour,noise_level, precip,wind = int(lineData[0]),float(lineData[1]),float(lineData[2]),float(lineData[3])\n\nprint(\"Done\")\n\nhour = 0\nhour = (hour + 8)%24\n\nprint (hour)\n\n\n'''\n if QUESTION4:\n hours.append(hour)\n noise.append(noise_level)\n\n'''" }, { "alpha_fraction": 0.6022727489471436, "alphanum_fraction": 0.6401515007019043, "avg_line_length": 14.470588684082031, "blob_id": "553b65a9bed107938bd9926c2da6432da6d4d126", "content_id": "27470a4c75bb90ffcd848d0ad0849e7db8862152", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 264, "license_type": "no_license", "max_line_length": 31, "num_lines": 17, "path": "/finalExam/prob26.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\niniString = \"ENG 1D04\"\n\ndef op1(iniString):\n\n c = iniString.index(\" \")\n finString = iniString[c+2:]\n print(finString)\n\nop1(iniString)\n\ndef op2(iniString):\n\n c = iniString.index(\"4\")\n finString = iniString[c-2:]\n print(finString)\n\nop2(iniString)\n" }, { "alpha_fraction": 0.5718799233436584, "alphanum_fraction": 0.6003159284591675, "avg_line_length": 17.18269157409668, "blob_id": "4b2adcea7e9898b7e93992bfde6218c85f8facf9", "content_id": "f8474a83ce8cb8a731042a331e2f917e9ce072d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1899, "license_type": "no_license", "max_line_length": 111, "num_lines": 104, "path": "/Week 5/Tutorial 5.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n'''\nrandom methods:\n\nrandom.random() - 0 to 1\n\nrandom.randint(a,b) - a rand between a and b\n\nrandom.randrange(start, stop, step) - similar to range function\n\nrandom.choice() random from a non - sequence - ??????????\n\nFile handling:\n\nclosing and opening a notebook\n\nGraphics: \n\npip install graphics.py\n\npractice - importing grades from a file and shoving it in a array of gpas\nand create a bar chart with \"pylab\" - > import pylab\n\nLEARN PYLAB\n\nlook what strip(\",\")\n\n'''\n\nfrom graphics import *\n\ndef main():\n win = GraphWin(\"My Circle\", 1000, 1000)\n c = Circle(Point(50,50), 10)\n c.draw(win)\n win.getMouse() # pause for click in window\n win.close()\n\n#main()\n\ndef example():\n\n win = GraphWin()\n win.setCoords(0.0,0.0,10.0,10.0)\n c1 = Circle(Point(3,5),3)\n c1.setFill('red')\n c2=c1 #.clone() # this will point to the same varibale, can use method .clone() to duplicate it!!\n c2.setFill('blue')\n c2.move(4,0)\n c1.draw(win)\n c2.draw(win)\n\n win.getMouse()\n win.close()\n\nexample()\n\ndef practice2Major():\n\n file = open('lab6example.txt','r')\n content = file.read()\n file.seek(0,0)\n content2 = file.readlines()\n\n L = []\n Intermed = []\n\n sumAB = 0\n sumB = 0\n\n firstnum = []\n secondnum = []\n\n L = []\n anEntry = []\n\n #exampleee = content2[0].strip().split(\",\")\n\n for index in range(len(content2)): # putting numbers into lists\n splitted = content2[index].strip().split(\",\")\n\n firstnum.append(float(splitted[0]))\n secondnum.append(float(splitted[1]))\n\n anEntry = [firstnum[index],secondnum[index]]\n\n L.append(anEntry)\n\n print(L[index])\n\n\n for index in range(len(content2)):\n sumAB += firstnum[index] * secondnum[index]\n sumB += secondnum[index]\n\n V = sumAB/sumB\n print(V)\n\n file.close()\n \n#practice2Major()\n\n# a list of these\n\n# V = \n\n\n\n\n\n" }, { "alpha_fraction": 0.4444444477558136, "alphanum_fraction": 0.4722222089767456, "avg_line_length": 7.5, "blob_id": "9cb3a2f027ee08bae4a72b82419c486fc45ab81a", "content_id": "10a5f909231283567ce20d4f61acb6f773b3cde8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 36, "license_type": "no_license", "max_line_length": 24, "num_lines": 4, "path": "/Week 6/Lab 6.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n'''\nLab 6 - Major (practice)\n\n'''\n\n" }, { "alpha_fraction": 0.7441860437393188, "alphanum_fraction": 0.7906976938247681, "avg_line_length": 20.5, "blob_id": "a235a707890b84c40a0cf7d1ff3358e2ca33fb57", "content_id": "20ea5092e0b305335a42c298a0796a4dd0ecd9fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 43, "license_type": "no_license", "max_line_length": 28, "num_lines": 2, "path": "/finalExam/prob10.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "import random\nprint(random.randrange(0,1))\n" }, { "alpha_fraction": 0.4383561611175537, "alphanum_fraction": 0.6301369667053223, "avg_line_length": 22, "blob_id": "fa6ffccfe546441a7ea8c911d92282410773f614", "content_id": "2ee76a829b54c2038d1f35e21af336e2e9e243ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 73, "license_type": "no_license", "max_line_length": 46, "num_lines": 3, "path": "/finalExam/prob24.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\nvalue = 12345678.926\n\nprint(\"The value is : {0:9.2f}\".format(value))\n\n\n\n" }, { "alpha_fraction": 0.3764258623123169, "alphanum_fraction": 0.4220532178878784, "avg_line_length": 14.411765098571777, "blob_id": "cf93bd42e120279676bc9c1dfaa22b38bf307fbd", "content_id": "94bcea19664f9f9959f19a8a2beb020192d64c64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 32, "num_lines": 17, "path": "/finalExam/prob13.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "def sum (a,b):\n s = []\n for i in range(len(a)):\n s.append(a[i]+b[i])\n return s\n\ndef add(A,B):\n c = []\n for i in range(len(A)):\n c.append(sum(A[i],B[i]))\n\n return c\n\nA = [[2,1,3],[-1,2,0]]\nB = [[1,1,-1],[2,0,6]]\n\nprint(add(A,B))\n\n" }, { "alpha_fraction": 0.48616600036621094, "alphanum_fraction": 0.5415019989013672, "avg_line_length": 11, "blob_id": "464985495bab80a6775d75dd1a09e1976c8ea6c1", "content_id": "5d1494d4fac2afd61c6a8fcea7a8c55aad175855", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 253, "license_type": "no_license", "max_line_length": 34, "num_lines": 21, "path": "/Week 2/Tutorial 2.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\nimport math\n\ndef Calculation(R,r):\n A = 0\n\n A = 4*(math.pi**2)*R*r\n\n B = 4*(math.pi**2)*R*r*3\n\n C = 100*(abs(A-B)/B)\n\n return A,B,C\n \nR_value = 5\nr_value = 4\n\nans = Calculation(R_value,r_value)\n\nprint(ans[0])\nprint(ans[1])\nprint(ans[2])\n" }, { "alpha_fraction": 0.6138377785682678, "alphanum_fraction": 0.6346284747123718, "avg_line_length": 20.115108489990234, "blob_id": "4de57eade6f37723907ff45c8a6defad4fd0358a", "content_id": "c4cd458210d9208ff03936587f73cd2937ed8fca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2934, "license_type": "no_license", "max_line_length": 111, "num_lines": 139, "path": "/Week 1/Lecture 1.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "#Lecture 1 examples\n\ndef PlusEqualsOperator():\n '''\n 1.1\n\n # Plus-Equal Operator\n\n counter = 0\n counter += 10\n\n # This is equivalent to\n\n counter = 0\n counter = counter + 10\n\n # The operator will also perform string concatenation\n\n message = \"Part 1 of message \"\n message += \"Part 2 of message\"\n\n E.g:\n\n '''\n\n Message1 = \"part 1 \"\n Message1 += \"part 2\"\n\n print(Message1) #this will print \"part1 part2\"\n\ndef PythonString():\n first = \"Hello \"\n second = \"World\"\n\n result = first + second\n\n long_result = first + second + \"!\"\n\n print(long_result) #this will print \"Hello World!\"\n\ndef DataTypes():\n #Use function type() to test which the datatype a variable is\n\n #--------\n\n boolDataType = True #bool data type\n\n #Note: has to start with capital T otherwise does not work\n\n print(type(boolDataType)) #this will output the data types name\n print(\"---\")\n\n #--------\n\n RandomNumber = 12.34 #A random variable\n\n print(type(int(RandomNumber))) \n print(int(RandomNumber))\n print(\"Above: type 'int' Experssion: 12 \\n\" + \"---\")\n \n\n print(type(float(RandomNumber))) \n print(float(RandomNumber))\n print(\"Above: type 'float' Experssion: 12.34 \\n\" + \"---\")\n\n print(type(RandomNumber))\n print(RandomNumber)\n print(\"Above: type 'float but it is automatically defined as that by Python' Experssion: 12.34 \\n\" + \"---\")\n\n RandomString = \"D04 is chill\" #A random string\n\n print(type(RandomString))\n print(RandomString)\n print(\"Above: type 'string' Experssion: 'D04 is chill' \\n\" + \"---\")\n\n RandomList = [\"value\",69,420] #A random sent of variables and data types\n\n print(type(RandomList))\n print(RandomList[0] + str(RandomList[1]) + str(RandomList[2]))\n print(\"Above: type 'list' Experssion: 'value69420' which all the values in the list combined \\n\" + \"---\")\n\ndef Constants():\n\n CONSTANT = 12.34 #constant\n\n addition = 0 #variable\n\n addition = 1 + CONSTANT \n\n print(addition)\n\ndef Statements():\n\n #most of these his will be covered later on, for now remeber:\n\n #'FOR' LOOPS\n #are used for definite loops\n\n aList = [\"this\",\"is\",\"an\",\"example\"]\n for number in aList:\n print (number)\n\n #this runs through 'aList' and prints the strings contained within it\n #'number' is a variable that holds the temporary information contained in the current iteration of the loop\n\n print(\"-----\")\n\n #'WHILE' LOOPS\n #are used for indefinite loops\n\n i = 1\n while i < 6:\n print(i)\n\n i += 1\n\n #this loop will continously add 1 to i until i reaches 6, after which the contition given (i < 6)\n #ceases to be true, ending the loop\n\n\n\nPlusEqualsOperator()\nPythonString()\nDataTypes()\nConstants()\n#Statements()\n\n#use comments to turn on or off each example\n\n\n#COMMENTS\n\n#use the hashtag for one line comments\n\n'''\nuse these\n\nif you want to write a long explanation\n'''" }, { "alpha_fraction": 0.47303617000579834, "alphanum_fraction": 0.4968827962875366, "avg_line_length": 23.026216506958008, "blob_id": "f49191acf97a21872634b2847c2be8d83ff72a0d", "content_id": "3532209f9dc76999ed3c790797f867ebfd04b9f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6416, "license_type": "no_license", "max_line_length": 131, "num_lines": 267, "path": "/Week 10/onlineLab5_Problem2.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "from calendar import monthrange\n\nclass Date:\n\n #jan 1\n #feb 2\n #mar 3\n #april 4\n #may 5\n #june 6 \n #july 7 \n #aug 8\n #sept 9 \n #oct 10\n #nov 11\n #dec 12\n\n def __init__(self, date, month, year):\n\n self.d = date\n self.m = month\n if date > self.daysInMonth(month, date):\n raise NameError(\"Non-existant date\")\n self.y = year\n\n def getDay(self):\n return self.d \n \n def getMonth(self):\n return self.m\n\n def getYear(self):\n return self.y\n\n def daysInMonth(self, month,year): #using a calendar library that contains the number of\n x = monthrange(year,month) #days in each month of a given year\n daysNum = int(x[1])\n\n return daysNum\n\n def stringConvert(self):#converts the object vars into a string for output and testing\n\n strReturn = str(self.d) + \"/\" + str(self.m) + \"/\" + str(self.y)\n\n print(strReturn) \n\n return strReturn\n\n def Next(self):\n #normal case\n if self.d != self.daysInMonth(self.m,self.y):\n\n #return Date(days,months,year)\n return Date(self.d+1,self.m,self.y)\n\n #next months case\n elif self.d == self.daysInMonth(self.m,self.y) and self.m != 12:\n \n #return Date(days,months,year)\n return Date(1,self.m+1,self.y)\n\n #next year case\n elif self.d == self.daysInMonth(self.m,self.y) and self.m == 12:\n \n #return Date(days,months,year)\n return Date(1,1,self.y+1)\n\n #for any other cases\n else:\n \n #error-ish:\n return Date(0,0,0)\n \n def Prev(self):\n #normal case\n if self.d != 1:\n\n return Date(self.d-1,self.m,self.y) \n #prev months case\n elif self.d == 1 and self.m != 1:\n\n return Date(self.daysInMonth(self.m-1,self.y),self.m-1,self.y)\n #prev year case\n else:\n\n return Date(self.daysInMonth(12,self.y),12,self.y-1)\n \n def isBefore(self,d):\n\n #different year\n if self.y > d.y:\n return True\n\n #different month\n elif self.m > d.m and self.y == d.y:\n return True\n\n #date, same month\n elif self.d > d.d and self.m == d.m and self.y == d.y:\n return True\n\n #else\n else:\n return False\n\n def isAfter(self,d): #d is after self\n\n #different year\n if self.y < d.y:\n return True\n\n #different month\n elif self.m < d.m and self.y == d.y:\n return True\n\n #date, same month\n elif self.d < d.d and self.m == d.m and self.y == d.y:\n return True \n\n #else\n else:\n return False\n\n def isEqual(self,d): #checks if dates are equivalent\n\n if self.d == d.d and self.m == d.m and self.y == d.y:\n return True\n else:\n return False\n \n def add_days(self,n):\n\n numberAdded = n\n\n day = self.d\n month = self.m\n year = self.y\n\n condition = False\n\n while condition == False:\n\n day += 1\n numberAdded -= 1 #add each day consecutively, will take a while if year is more than 1\n \n if day >= self.daysInMonth(month,year) and month == 12: #it works tho\n\n day = 1\n numberAdded -= 1\n month = 1\n year += 1\n\n if day >= self.daysInMonth(month,year): #case for numerous months\n\n day = 1\n numberAdded -= 1\n month += 1\n\n if numberAdded <= 0: #loop exit\n condition = True\n\n return Date(day,month,year)\n \n def daysBetweenAux(self,day1,day2,month1,month2,year1,year2):\n\n condition = False\n\n x1 = 0\n x2 = 0\n x3 = 0\n\n x1 = abs(self.daysInMonth(month1,year1) - day1) #add the days left in current month\n\n #general for different months\n if month1 != month2 and year1 == year2:\n \n while condition == False:\n \n if month1+1 == month2:\n\n x2 = day2 #add days from the 1st to the specified date\n\n condition = True #break the loop and return vars\n\n break\n\n if month1 != month2:\n\n month1 += 1\n\n x3 += self.daysInMonth(month1,year1) #add each day of a given months then progress to next month\n\n #general for different years\n else:\n \n while condition == False:\n\n if month1+1 == month2 and year1 == year2:\n\n x2 = day2\n\n condition = True\n\n break\n\n if month1 != month2 or year1 != year2: #add days of months\n\n month1 += 1\n\n x3 += self.daysInMonth(month1,year1)\n\n if month1 == 12: #skip to next year and add days\n\n x3 += self.daysInMonth(month1,year1)\n\n year1 += 1\n\n month1 = 1\n\n return x1 + x2 + x3 #return summ of days\n\n def days_between(self, d): \n\n #assigning local vars for readability\n day1 = self.d\n month1 = self.m\n year1 = self.y\n\n day2 = d.d\n month2 = d.m\n year2 = d.y\n\n #vars for day addition\n x1 = 0\n x2 = 0\n x3 = 0\n \n condition = False\n\n if month2 == month1 and year2 == year1: #simple case\n\n return abs(day1 - day2)\n\n if self.isBefore(d) == True: #if isBefore is true will swap vars to make cases isBefore and isAfter the same\n #print(\"before\")\n\n day2 = self.d\n month2 = self.m\n year2 = self.y\n\n day1 = d.d\n month1 = d.m\n year1 = d.y\n\n daysNum = self.daysBetweenAux(day1,day2,month1,month2,year1,year2) #calls in auxillary function\n\n return daysNum\n\n if self.isAfter(d) == True:\n #print(\"after\")\n\n daysNum = self.daysBetweenAux(day1,day2,month1,month2,year1,year2) #calls in auxillary function from prev assigned vars\n\n return daysNum\n\n if self.isEqual(d) == True: #return 0 if same day\n return 0\n\n" }, { "alpha_fraction": 0.48148149251937866, "alphanum_fraction": 0.5555555820465088, "avg_line_length": 10.285714149475098, "blob_id": "cb9cf82f889fbe4b57a850e49c1c27f3e20b655d", "content_id": "b2cda427cd538940cff387d083aa2d48940e4550", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 81, "license_type": "no_license", "max_line_length": 16, "num_lines": 7, "path": "/finalExam/prob50.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n\ns = \"Eng1D04\"\n\nprint(s[0])\nprint(s.lower())\nprint(s.strip())\n\n#print(s[3]='2')\n" }, { "alpha_fraction": 0.5904762148857117, "alphanum_fraction": 0.6190476417541504, "avg_line_length": 13.857142448425293, "blob_id": "a19ed0b7ac2840b9a03fdc29d21219d220b8697a", "content_id": "73a4428af3d7bf6bc0ee9e8eea660e85d5bd22b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 35, "num_lines": 7, "path": "/finalExam/prob12.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "import math\nradius = -20\n\nif radius >= 0:\n area = radius * radius *math.pi\n\nprint(\"the area:\", area)\n\n" }, { "alpha_fraction": 0.4960629940032959, "alphanum_fraction": 0.5170603394508362, "avg_line_length": 20.05555534362793, "blob_id": "b4a114760f928043ff098752259d00e5361cfdbf", "content_id": "6f93e7fb6b060d13403856944f072ab058311f45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 381, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/finalExam/prob8.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\ndef getAge(age):\n phrase = ''\n if(age < 16):\n phrase = phrase + \"cant drink,drive,vote\"\n if(age >= 16):\n phrase = phrase + \"can drive but no rest\"\n if (age >= 18):\n phrase = phrase + \"cant ddrink\"\n if (age >= 19):\n phrase = phrase + \"can all\"\n\n return phrase\n\ndef main():\n a = input(\"enter age\")\n print(getAge(int(a)))\n\nmain()\n\n" }, { "alpha_fraction": 0.5844594836235046, "alphanum_fraction": 0.599662184715271, "avg_line_length": 15.284403800964355, "blob_id": "e51ef2b62bfe1eb4ad9d5f564ed177246f5e3374", "content_id": "7f586eaff4a89463ee2429fe91a0025c9878e01d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1776, "license_type": "no_license", "max_line_length": 195, "num_lines": 109, "path": "/Week 9/Lab 9.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n\n'''\nType conversions\n\nx = 1\n\nstr(x)\n\ndifference between\n\nif \nif\n\nand \n\nif\nelif\n\n\n\n'''\n\ndef buzz(checkNum, buzzNum):\n\n splitted = str(checkNum)\n\n #print(int(splitted[1]))\n\n if checkNum == 0:\n\n return str(checkNum)\n\n else:\n\n if checkNum % buzzNum == 0:\n\n #print(\"Buzz\")\n\n return \"Buzz\"\n\n elif str(checkNum).find(str(buzzNum)) >= 0:\n\n #print(\"Buzz\")\n\n return \"Buzz\"\n\n else:\n\n stringconversion = str(checkNum)\n\n #print(stringconversion)\n\n return stringconversion\n\n\ndef whileBuzz(buzzCount, buzzNum):\n\n #buzzEnd = 1\n\n buzzTime = 0\n\n count = 0\n\n buzzCounter = 0\n\n buzzLoop = []\n\n #buzzNum = 0\n\n while buzzTime != buzzCount:\n\n x = buzz(count, buzzNum)\n\n if x == \"Buzz\":\n\n buzzTime += 1\n\n #print(buzz(count, buzzNum))\n\n buzzLoop.append(buzz(count, buzzNum))\n\n count += 1\n\n else:\n\n #print(buzz(count, buzzNum))\n\n buzzLoop.append(buzz(count, buzzNum))\n\n count += 1\n\n return buzzLoop\n\n\nprint(buzz(11,4))\nprint(buzz(51, 5))\nprint(whileBuzz(0, 3))\n\n'''\n\n1. Yes, the simplest solution would be to write a for loop that goes to a very large number and when put condiditons are satisfied, write break() to prevent the loop from running to the set limit\n\nNO, YOU CANT. HAVE TO USE FOR\n\n2. In my case, an empty loop will return. Happens because the conditions of \"ending once there has been a number of \"Buzz\" outputs equal to buzzCount\" is satified\n\n3. you could introduce a counter that adds +1 each iteration and when that number is divisible by 2, no output is returned\n\n4. infinitly many times in theory, but on practice, until memmory runs out\n'''" }, { "alpha_fraction": 0.3636363744735718, "alphanum_fraction": 0.3863636255264282, "avg_line_length": 13.55555534362793, "blob_id": "6a682b67f07b9b6e1980ab55dea944b2f5f6c9b1", "content_id": "dd15c942c3b89e836c8f6206e8c38ea77ac590ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 132, "license_type": "no_license", "max_line_length": 23, "num_lines": 9, "path": "/finalExam/prob48.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "def myst2(L,a):\n i = 0\n while i < len(L):\n if L[i] == a:\n return True\n\n i = i + 1\n\n return False\n\n" }, { "alpha_fraction": 0.48571428656578064, "alphanum_fraction": 0.5047619342803955, "avg_line_length": 10.55555534362793, "blob_id": "e1788b6c6e3faa8dfa68bb63496e622608c31aeb", "content_id": "1f58c7717a3668136155a7e3775a6a1987d70f77", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 105, "license_type": "no_license", "max_line_length": 24, "num_lines": 9, "path": "/finalExam/prob34.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "def func(x):\n print(x)\n if type(x) != int:\n\n raise(NameError)\n\n print(\"done\")\n\nfunc(10)\n\n" }, { "alpha_fraction": 0.7183734774589539, "alphanum_fraction": 0.7274096608161926, "avg_line_length": 17.799999237060547, "blob_id": "ba5c841e4bf73a6cef765cb9d3e5446545aad8b0", "content_id": "40aebfcaf4514615be552009835d553de4a82f15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 664, "license_type": "no_license", "max_line_length": 50, "num_lines": 35, "path": "/Week 2/Lecture 2.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "#Lecture 2 notes:\n'''\n\nPrograms can be INTERPRETED line by line:\n\n+ Interactive development and debugging of code\n\n- Generally slower than other methods\n\nPrograms can be COMPILED into BYTE CODE:\n\n+ Programs are more portable\n\n- Byte code is slower than native code\n\nPrograms can be complied into NATIVE MACHINE CODE:\n\n+ Machine code is optimized to run fast\n\n- Code development is more difficult\n\n\nADA LOVELACE\n\n - Considered the worlds first programmer\n\n'''\n\n#Pyhton identifiers (variable names):\n\n_value = 0 \n#-value = 0 because '-' is seen as a minus sign\n#value? = 0 because '?' is reserved\nmy_value = 0\n#my-value = 0 because '-' is seen as a minus sign\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6025640964508057, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 14.600000381469727, "blob_id": "f6d4c57ee150d4080b2d0d49c32a2dbb612c5f69", "content_id": "7ec156ddca83e737c905249bf3f97af83629a762", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 78, "license_type": "no_license", "max_line_length": 22, "num_lines": 5, "path": "/finalExam/prob28.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "product = 1\nfor d in range(2,7,2):\nproduct = product* d\nprint(d)\nprint(\"dsd\")\n" }, { "alpha_fraction": 0.3522012531757355, "alphanum_fraction": 0.402515709400177, "avg_line_length": 13.545454978942871, "blob_id": "729797293b62d1156a31a923f7847d51f7e2a07e", "content_id": "aa630a54d3605969ea5bf91b2d267b3ae535c565", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 159, "license_type": "no_license", "max_line_length": 31, "num_lines": 11, "path": "/finalExam/prob11.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "def conv(s,b):\n n = len(s)\n d = 1\n f = 0\n for i in range(n):\n f = f + int(s[n-i-1])*d\n d = d * b\n\n return f\n\nprint(conv(\"1111\",2))" }, { "alpha_fraction": 0.5936479568481445, "alphanum_fraction": 0.6013653874397278, "avg_line_length": 27.03333282470703, "blob_id": "5973f995869d75e753534fd263540d0594d8bf91", "content_id": "7207f83ae22d0be98b1ac9aa1dc4e30204e9d946", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3370, "license_type": "no_license", "max_line_length": 69, "num_lines": 120, "path": "/Week 10/Online lab 5 - Problem 1 solution 2.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n# Place all function definitions required for Problem 1 in this cell.\n\nimport matplotlib.pyplot as mpl\nimport math\nimport statistics\n\ndef scherrer(b, deg):\n \"\"\"scherrer function takes in `b` and `deg`\n and applies the Scherrer Equation.\n \"\"\"\n return (0.9)*(0.07107)/b/math.cos(deg*math.pi/180)\n\ndef findBeta(angles, intensties, peakIndex):\n \"\"\"findBeta funtion finds the beta value of each\n provided `peakIndex` using the given data.\n \"\"\"\n \n # calculate the intensity of this peak's beta\n betaIntensity = intensties[peakIndex]/2\n \n# print(betaIntensity)\n \n # chop and orient two lists for each side of the peak\n # with index 0 as the peak\n leftCurve = intensties[:peakIndex+1]\n leftCurve.reverse()\n rightCurve = intensties[peakIndex:]\n \n# print(rightCurve)\n\n # Initialize left and right beta values as empty\n leftValue = None\n rightValue = None\n \n # while loop find value with smallest delta will end\n # once both `leftValue` and `rightValue` have a value\n prevLeftDelta = float(\"inf\")\n prevRightDelta = float(\"inf\")\n i = 0\n while not (leftValue and rightValue):\n leftDelta = abs(betaIntensity - leftCurve[i])\n rightDelta = abs(betaIntensity - rightCurve[i])\n \n# print(rightDelta)\n \n # once delta increaces from the previous,\n # the previous angle value is closest\n if not leftValue and prevLeftDelta < leftDelta:\n leftValue = angles[peakIndex - (i - 1)]\n if not rightValue and prevRightDelta < rightDelta:\n rightValue = angles[peakIndex + (i - 1)]\n \n i += 1\n prevLeftDelta = leftDelta\n prevRightDelta = rightDelta\n \n return rightValue-leftValue\n \n \ndef getData(path):\n \"\"\"getData function intakes `path` to stored \n data and provides a list of `angles`,\n `intensities`, and `peakIndexes`.\n \"\"\"\n \n # initialize lists\n angles = []\n intensties = []\n peakIndexes = []\n i = 0\n \n # open file and loop through each line\n file = open(path, \"r\")\n for line in file:\n values = line.split()\n angles.append(float(values[0]))\n \n # if value has '*', its a peak, remember the index\n # and strip the '*'' out\n if line.find('*') > -1:\n peakIndexes.append(i)\n intensties.append(float(values[1].strip('*')))\n else:\n intensties.append(float(values[1]))\n \n i += 1\n \n return angles, intensties, peakIndexes\n \ndef showGraph(xAxis, yAxis, size):\n \"\"\"showGraph function displays a graph of the\n given data.\n \"\"\"\n \n # do graph stuff\n fig, ax = mpl.subplots()\n ax.set_xlabel('θ2 (deg.)')\n ax.set_ylabel('Intensity (a.u.)')\n ax.set_title(\"Nanoparticle Size Estimate: \" + str(size) + \" nm\")\n ax.plot(xAxis, yAxis)\n \ndef XRD_Analysis(file):\n \n # get data\n angles, intensties, peakIndexes = getData(file)\n\n # collect particle sizes of each peak and average\n sizes = set()\n for n in peakIndexes:\n beta = findBeta(angles, intensties, n)\n sizes.add(scherrer(beta, angles[n]))\n \n size = statistics.mean(sizes)\n\n # show graph\n showGraph(angles, intensties, size)\n \n \n # return average size\n return size\n " }, { "alpha_fraction": 0.5456621050834656, "alphanum_fraction": 0.6506849527359009, "avg_line_length": 11.028571128845215, "blob_id": "828fdc79595d6c1e3267da7c7bbe1c2612f254b7", "content_id": "37495e96979ac7070450497ed9e49a23105c074d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 37, "num_lines": 35, "path": "/Week 10/Online lab 5 - Problem 2 test.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\nfrom onlineLab5_Problem2 import Date\n\n#x = Date(33,5,2020)\n\n#y = Date(16,3,2018)\n\n#x.add_days(3).stringConvert()\n\n#print(x.days_between(y))\n\n#print(x.Prev().stringConvert())\n\n\n#testing\n\n#normal\nx = Date(1,1,2020)\n\ny = Date(16,3,2018)\n\nx.Next().stringConvert()\nx.Prev().stringConvert()\n\n\n#788\n\n#boundry\n#x.add_days(100000).stringConvert()\n#25/2/2294\n\n\n#abnormal\n#x.Prev().stringConvert()\n\n#NameError: Non - existant date\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.47111111879348755, "alphanum_fraction": 0.4933333396911621, "avg_line_length": 12.8125, "blob_id": "2039aed2f1758e7c8754724ef0a75ed8f5617555", "content_id": "c2217b3e9ea62172eeb0bc38b1159ed248e4d563", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 29, "num_lines": 16, "path": "/finalExam/prob51.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\ndef exam1(x):\n try:\n a=x\n b=x[:]\n\n return a/b\n\n except ZeroDivisionError:\n return 1\n except IndexError:\n return 2\n except TypeError:\n return 3\n\nL = []\nprint(exam1(L))\n\n\n\n" }, { "alpha_fraction": 0.523809552192688, "alphanum_fraction": 0.5374149680137634, "avg_line_length": 13.800000190734863, "blob_id": "9746ea6f937723265efb8aa82e946e9d032a0d6e", "content_id": "7865b0009a3729c2dc01ee9a5a2717c7ddbabec2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 147, "license_type": "no_license", "max_line_length": 27, "num_lines": 10, "path": "/finalExam/prob20.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "ay_oh = []\n\nfor i in range (5):\n ayyy_ohhh = []\n for j in range(i+1):\n ayyy_ohhh.append(j)\n\n ay_oh.append(ayyy_ohhh)\n\nprint (ay_oh)" }, { "alpha_fraction": 0.4000000059604645, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 13.142857551574707, "blob_id": "488766758b3cf3799c49596b58fdb0b7e939037b", "content_id": "3c9ac3553d435e347fa92a766c383456f913b2ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 100, "license_type": "no_license", "max_line_length": 28, "num_lines": 7, "path": "/finalExam/prob55.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "x = 4\n\nfor a in range(1,x+1):\n for b in range(1,a+1):\n print(a*b,end = ' ')\n\n print()\n\n" }, { "alpha_fraction": 0.34545454382896423, "alphanum_fraction": 0.41818180680274963, "avg_line_length": 8.166666984558105, "blob_id": "19a88f3323ffc91a2bdb7b53ea3594401cd596ca", "content_id": "ed60a545c8656a13b182042424369320655fb867", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 55, "license_type": "no_license", "max_line_length": 12, "num_lines": 6, "path": "/finalExam/prob17.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "a = 1\nb = [1,2,3]\nc = \"asd\"\n\nfor a in b :\n print(c)\n" }, { "alpha_fraction": 0.5626911520957947, "alphanum_fraction": 0.5856269001960754, "avg_line_length": 20.633333206176758, "blob_id": "50ce1d0e4b80ed799fad2065c7d2d5e182f6c572", "content_id": "18dd61a0a028df12d694f2c4902c269b297801b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 60, "num_lines": 30, "path": "/finalExam/prob1.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\nclass dog:\n def __init__(self,breed,name,weight):\n self.breed = breed\n self.name = name\n self.weight = weight\n self.age = 0\n self.tricks = []\n\n def speak(self):\n print(\"speak method\")\n\n def learnTrick(self,newTrick):\n self.tricks.append(newTrick)\n \n def getVetRec(self):\n return self.name, self.breed, self.age , self.weight\n\n def haveBDay(self):\n self.age += 1\n print(\"happy \", self.name,\" !\")\n\ndog1 = dog(\"asad\",\"sasd\",500)\ndog2 = dog1\ndog2.learnTrick(\"trick1\")\ndog3 = dog2\ndog3.learnTrick(\"tricl2\")\nprint(\"done\")\n\nhello = dog1.getVetRec()\nprint(type(hello))\n\n\n\n\n" }, { "alpha_fraction": 0.4576271176338196, "alphanum_fraction": 0.5254237055778503, "avg_line_length": 10.800000190734863, "blob_id": "b5f91eef24b32946c9d2ee754435d977f9dde7ef", "content_id": "53f31e1139d05fee4abb947b1fa47f7693e07645", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 19, "num_lines": 10, "path": "/finalExam/prob23.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\ndef f1(x,y):\n return x+y\n\ndef f2(x,y,f):\n return f1(x,y)\n\ndef f3(x,y,f):\n return f(x,y,f)\n\nprint(f3(1,1,f2))" }, { "alpha_fraction": 0.2918919026851654, "alphanum_fraction": 0.3837837874889374, "avg_line_length": 12, "blob_id": "91fe023bcab32a487383fe78730ecf760b5f15be", "content_id": "a0f11d0bb13e76b0c946205d3a75709fe4c6842d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 185, "license_type": "no_license", "max_line_length": 36, "num_lines": 14, "path": "/finalExam/prob59.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n\nx = 6\ny = 0\n\nif x >= 2 and y != 0 and (x/y) >2:\n print(\"1\")\n\n#if x >= 2 and (x/y) > 2 and y != 0:\n #print(\"2\")\n\nx = 1\ny = 0\n\nif x >= 2 and (x/2) > 2 and y != 0:\n print(\"3\")\n\n" }, { "alpha_fraction": 0.45894429087638855, "alphanum_fraction": 0.4809384047985077, "avg_line_length": 10.689655303955078, "blob_id": "bcbc4aecf8a0e278f1b3c848fbc159af67c0ba0d", "content_id": "153b8a5a199c08b159e8e6a29b76d363739fddd0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 682, "license_type": "no_license", "max_line_length": 44, "num_lines": 58, "path": "/Week 7/Lecture 7.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n\n'''\nwhile loops\n\ndifferent things you could do with them\n\nexceptions to the rules, copy down for tests\n\n\n\n\n'''\n\ndef main():\n total = 0.0\n moredata = \"Yes\"\n\n while moredata[0] == \"Yes\":\n x = float(input(\"enter num: \"))\n total = total + x\n moredata = input(\"enter more?: \")\n\n print(\"the total is\", total)\n\nmain()\n\na = 1\nb=2\nc=3\n\n\ndef main2():\n while a <= b <= c:\n print(b)\n\n break\n\nmain2()\n\ndef main3():\n\n while a <= b:\n while b <= c:\n print(c)\n\n break\n\ndef main3():\n a = 2\n b = 5\n c = 4\n\n while(a or b and not c):\n print(\"fuck\")\n a = a -1 \n\n#order: not, and, or\n\nmain3()\n\n\n" }, { "alpha_fraction": 0.7310924530029297, "alphanum_fraction": 0.7731092572212219, "avg_line_length": 58.5, "blob_id": "f81a7f3d165b073df6bfdf80b76eab9e8536aab2", "content_id": "0ec72b1d14516ad81813a2f302c4876f1e321d54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 119, "license_type": "no_license", "max_line_length": 103, "num_lines": 2, "path": "/README.md", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "# survivingD04\nA study-guide to 1D04, updated weekly, hopefully. Feel free to copy and use for ur own purposes, enjoy!\n" }, { "alpha_fraction": 0.482917457818985, "alphanum_fraction": 0.500575840473175, "avg_line_length": 27.282608032226562, "blob_id": "5facda941ac58aa9d53872552341e70a600a7b86", "content_id": "8895c14f2c8ed2736828353a032ea0ed321eef2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2605, "license_type": "no_license", "max_line_length": 99, "num_lines": 92, "path": "/Week 6/test 1 review.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n\nfrom PIL import Image\n\n\n\n\ndef main():\n\n nameof= \"ttt.png\"\n\n cols = int(80)\n\n scale = float(0.32)\n\n image = convert(nameof, cols, scale, moreLevels)\n\n\n\n\n\n\n\ndef convert(fileName, cols, scale, moreLevels):\n\n gscale1 = \"$@B%8&WM#*oahkbdpqwmZO0QLCJUYXzcvunxrjft/\\|()1{}[]?-_+~<>i!lI;:,\\\"^`'. \"\n # 10 levels of grey - THIS CODE DOES NOT NEED TO BE EDITED\n gscale2 = '@%#*+=-:. '\n\n # open image in given file path and convert to greyscale - THIS CODE DOES NOT NEED TO BE EDITED\n image = Image.open(fileName).convert('L')\n\n '''\n\n # store dimensions of image using size method (returns a list)\n W, H = image.size()\n print(\"input image dims: %d x %d\" % (W, H))\n # compute width of tile/column\n w = _______\n # compute tile/row height based on aspect ratio and scale\n h = _______\n # compute number of rows - must be an integer value\n rows = ______\n\n # These print statements tell the user the dimensions of the image and of the tiles\n print(\"cols: %d, rows: %d\" % (cols, rows))\n print(\"tile dims: %d x %d\" % (w, h))\n \n # check if image size is too small for given cols or rows\n if _______:\n print(\"Image too small for specified cols!\")\n exit(0)\n \n # END OF PART ONE -----------------------------------------------------------------------\n\n # START OF PART THREE (20%)-------------------------------------------------------------------\n # ascii image is a list of character strings\n aimg = []\n\n # generate list of dimensions using nested for loop\n # y1 pattern: 0, h, 2h, 3h, ...; y2 pattern: h, 2h, 3h, 4h, ...\n for j in range(rows):\n y1 = ____\n y2 = ____\n # correct last tile\n if j == rows-1:\n y2 = H\n # append an empty string\n aimg.append(\"\")\n for i in range(cols):\n # crop image to tile\n x1 = ____\n x2 = ____\n # correct last tile\n if i == cols-1:\n x2 = W\n # crop image to extract tile\n img = image.crop((x1, y1, x2, y2))\n\n # get average luminance of cropped tile (it should be an integer)\n avg = ________\n # look up ascii char by generating a string index based on avg\n if moreLevels:\n gsval = gscale1[int((avg*_)/255)]\n else:\n gsval = gscale2[int((avg*_)/255)]\n # append ascii char to string\n aimg[j] += gsval\n \n # return txt image as a list of strings (1 string = 1 row of text file)\n return aimg\n '''\n \n return(1)\n\n" }, { "alpha_fraction": 0.2786885201931, "alphanum_fraction": 0.35519126057624817, "avg_line_length": 14.083333015441895, "blob_id": "b91dd9fb9295611526053c2a44b81679bb409b39", "content_id": "5f7f1160d0f805f464fa3b6336154705f8e6271b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 38, "num_lines": 12, "path": "/finalExam/prob22.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\ndef myst(n1,n2):\n g = 1\n k = 2\n while k <= n1 and k <= n2:\n if n1 % k == 0 and n2 % k== 0:\n g = k\n \n k+= 1\n\n return g\n\nprint(myst(4,12))\n\n" }, { "alpha_fraction": 0.574508547782898, "alphanum_fraction": 0.5897273421287537, "avg_line_length": 13.467889785766602, "blob_id": "4dacfbf8c2fe02e26fd3be5685ff469158a55bba", "content_id": "54f2b5fe07a2871fe59f90c818c71038f9ce4a2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1577, "license_type": "no_license", "max_line_length": 113, "num_lines": 109, "path": "/Week 7/Lab 7.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n'''\nLab 7 - minor\n\nReview:\n\n.split() - converts from string to list\n\n.join() - converts from list to string\n\n'''\n\n#join demo\ndef demo():\n strlist = ['H', 'e', 'l', 'l', 'o']\n print(\"*\".join(strlist))\n\n#demo()\n\n\n\n\n#function demo\ndef demo2():\n double = x * 2\n quad = x * 4\n return double, quad\n\n#answ = demo2(2)\n#print(answ[1]) # using the touple method\n\n\n\n#exception handling\n\ndef demo3(x):\n\n try:\n foo = 1/(x**2)\n\n return foo\n\n except:\n\n return \"error\"\n\n#print(demo3(0))\n\n\n\n#ACTUAL LAB\n\ndef breakSentence(string):\n\n foo = string.split()\n\n return foo\n\ndef combineSentence(strList):\n\n foo = \" \".join(strList)\n\n return foo\n\ndef breakParagraph(paragraph):\n\n foo2 = []\n\n foo1 = paragraph.split(\"\\n\")\n\n for i in range(len(foo1)):\n foo2.append(foo1[i].split())\n\n return foo2\n\ndef combineParagraph(parList):\n\n try:\n\n foo1 = []\n\n for i in range(len(parList)):\n foo1.append(\" \".join(parList[i]))\n\n foo2 = \"\\n\".join(foo1)\n\n return foo2\n\n except:\n\n return \"broken\"\n\nprint(breakSentence(\"I will be there for you\"))\nprint(combineSentence(['I', 'will', 'be', 'there', 'for', 'you']))\nprint(breakParagraph(\"I will be there for you\\nwhen the rain starts to pour\"))\n\n#print(combineParagraph([[\"I\",\"will\",\"be\",\"there\",'for','you'],['when', 'the', 'rain', 'starts', 'to', 'pour']]))\nprint(combineParagraph(1))\n\n'''\nLAST TWO SECTIONS\n\nerror:\ninstead of a list put in a number and run it, there will be an error so we can a try/except statement\n\nlong ans:\n\ntest cases:\n\n'''" }, { "alpha_fraction": 0.6561071276664734, "alphanum_fraction": 0.6806009411811829, "avg_line_length": 19.151315689086914, "blob_id": "e8ea38f8b23ca43f4686dbabee20b7d83a7825d1", "content_id": "97642fbddf218c54a973e14a96619ea92754fd3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3062, "license_type": "no_license", "max_line_length": 152, "num_lines": 152, "path": "/Week 3/Lecture 3.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "'''\n\nList operations:\n.append\n.pop\n.remove\n.insert\n.index\netc\n\nfind:\nprint(<value>, end = ' ') the end part specifially\n\nACII characters in a string\n\nprint(string[0:3])\n\n\nstring opperations:\n+\n*\nlen()\nindex through string\n\netc\n\nord()\n\nchr()\n\nSEQUENCES - is an enumerated collection of objects inwhich repetitions are allowed.\n\nthe built in sequences include: \n\n str (immutable ASCII strings)\n\n list (mutable list of values)\n'''\n\nsamplelist = [1,2,3,4,5]\nsamplelist2 = [6,7]\nsamplestring = \"example\"\nsamplestring2 = \" yeet\"\n\ndef listexamples(list1,string1,list2,string2):\n #Lists start counting from 0 (1st number in a list is a position 0)\n print(\n list1, #shows the contents in the variables\n string1 \n )\n\n print(\n len(list1), #shows the length of the list\n len(string1)\n )\n\n print(\n len(list1), #shows the length of the list\n len(string1)\n )\n\n print(\n list1+list2, #combines string with string and list with list\n string1+string2\n )\n\n print(\n list1[0], #prints the 2nd value in the sequence, remeber that sequences start at 0.\n string1[0]\n )\n\n print(\n list1[1]*2, #here, the second valus is multipled by 2 and printed\n string1[1]*2 #here, the second letter is printed twice\n )\n\n print(\n list1[2:4], #here, only the 3 through 5 value is printed\n string1[2:4]\n )\n\n print(\n list1[2:], #here, the variables are printed from the 3 number onwards\n string1[2:]\n )\n\n#listexamples(samplelist,samplestring,samplelist2,samplestring2) # showcases operators shared by both sequence types\n\n'''\nLISTS\n- finite array of values\n- in python, arrays can contain different data types\n- lists are mutable - Values can be changed\n\nSTRINGS\n- finite sequence of characters\n- are defined with single or double quote marks\n- lists are immutable - Values cannot be changed\n\n- non printable characters (e.g newline) are represented with a backslash (\\n)\n'''\n\n\n'''\nPython OBJECTS + METHODS\n\nall data types in python are objects with predefined methods, for lists and strings we have:\n\n'''\n\ndef anotherlistexample(samplelist,samplestring):\n\n #LISTS\n\n samplelist.append(6) #adds the value '6' to the end of the list \n\n samplelist.pop() #removes and returns the last value in the list, in this case '6'\n\n samplelist.remove(2) #removes a value in the list, in this case looks for '2' and removes it\n\n samplelist.insert(2,8) #insert a value before the index, in this case goes to position '4' and adds '1'. All other values are moved down 1 position.\n\n value = samplelist.index(3) #returns an items location in the list\n\n #STRINGS\n\n value = samplestring.startswith(\"m\") #returns true or false if conditions are matched\n\n value = samplestring.endswith(\"e\") #returns true or false if conditions are not matched\n\n value = samplestring.rfind(\"p\") #finds the rightmost p and returns its position value\n\n \n\n print('end')\n\nanotherlistexample(samplelist,samplestring)\n\n\n\n\n\n\n\n\n\n'''\n\n00100100\n\n\n'''" }, { "alpha_fraction": 0.5414710640907288, "alphanum_fraction": 0.5805946588516235, "avg_line_length": 10.814814567565918, "blob_id": "c470f01370c388ed7b26828c897913be96257eb5", "content_id": "c840f589bfb4cc0fc3c5da723693d1a161beed5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 639, "license_type": "no_license", "max_line_length": 43, "num_lines": 54, "path": "/0-Introduction/Lab 1.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "#Lab 1 work\n\n'''\nExponents\n\nFor loops \n\nrange() function\n\nFor loop as sigma notation\n\nfunctions\n'''\n\nimport math\n\ndef minor1(number):\n\n e = 0\n sigma = 0\n product = 0\n\n #1a - convergence\n\n e = (1+1/number)**number\n #print(e)\n\n #1b - sigma notation\n \n for something in range(number+1):\n\n sigma += 5*(something**2)\n \n #print(sigma)\n\n #1c - profuct notation\n\n product = 3*1**2\n\n for somethingelse in range(2,number+1):\n\n product *= 3*(somethingelse**2)\n\n #print(product)\n\n return e, sigma, product\n\nresult = minor1(5)\n\nprint(result[0])\n\nprint(result[1])\n\nprint(result[2])\n\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5, "avg_line_length": 4.599999904632568, "blob_id": "ff85d91329fd168ddc39f2bb604ec34dbe15363d", "content_id": "6a7c14973eeba9e9b4814ae0bc48165a98f67708", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 28, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/Week 6/Tutorial 6.py", "repo_name": "OlegGlo/2D04", "src_encoding": "UTF-8", "text": "\n'''\n\nI missed this one\n\n'''" } ]
55
KenjiOhtsuka/pynium
https://github.com/KenjiOhtsuka/pynium
c7bd7c97abfbd35f16b8e7bfe9ea9ac7004bc022
06c01c0c64223230d7a810cfea19570d045dd910
b54f1c3ab68da890b3daa34d014f81563dc6dbde
refs/heads/master
2021-01-18T15:24:36.273290
2018-07-11T13:07:34
2018-07-11T13:07:34
34,391,284
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5485074520111084, "alphanum_fraction": 0.5517413020133972, "avg_line_length": 23.222890853881836, "blob_id": "a8455386e2e32d14b576ab1cb5f86671d687b707", "content_id": "f4c85a29adb1efb5e1100348ca3efdb2517625aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4020, "license_type": "no_license", "max_line_length": 81, "num_lines": 166, "path": "/core/web/web_driver.py", "repo_name": "KenjiOhtsuka/pynium", "src_encoding": "UTF-8", "text": "from enum import IntEnum\nfrom datetime import date\nfrom selenium import webdriver\nfrom .dom_element import DomElement\n\nclass BrowserType(IntEnum):\n Firefox = 0\n Chrome = 1\n\nclass WebDriver():\n def __init__(self, browser_type):\n if (browser_type == BrowserType.Chrome):\n self.driver = webdriver.Chrome()\n else:\n self.driver = webdriver.Firefox()\n\n def get(self, path):\n self.driver.get(path)\n\n def has_element(self, css_selector):\n \"\"\"\n check element existence\n :param css_selector: str\n :return:\n \"\"\"\n if len(self.driver.find_elements_by_css_selector(css_selector)) > 0:\n return True\n return False\n\n def find_element(self, css_selector):\n \"\"\"\n find element by css selector\n :param css_selector: str\n :return:\n \"\"\"\n return DomElement(self.driver.find_element_by_css_selector(css_selector))\n\n def find_elements(self, css_selector):\n \"\"\"\n\n :param css_selector: str\n :return:\n \"\"\"\n elements = self.driver.find_elements_by_css_selector(css_selector)\n return map(lambda e: DomElement(e), elements)\n\n def exec_javascript(self, script):\n \"\"\"\n execute javascript\n :param script:\n :return:\n \"\"\"\n self.driver.execute_script(script)\n return self\n\n def set_cookie(self, cookie_dict):\n \"\"\"\n\n :param cookie_dict:\n :return:\n \"\"\"\n if 'name' in cookie_dict and 'value' in cookie_dict:\n self.driver.add_cookie(cookie_dict)\n return self\n raise Exception('parameter should contain \"name\" and \"value\" entry.')\n\n def delete_cookie(self, cookie_name):\n \"\"\"\n delete cookie\n exception doesn't occur when you delete non-existing cookie\n :param cookie_name:\n :return:\n \"\"\"\n self.driver.delete_cookie(cookie_name)\n return self\n\n def delete_all_cookies(self):\n \"\"\"\n delete all cookies\n :return:\n \"\"\"\n self.driver.delete_all_cookies()\n return self\n\n def get_cookie(self, cookie_name):\n \"\"\"\n get cookie value\n :param cookie_name:\n :return:\n \"\"\"\n return self.driver.get_cookie(cookie_name)\n\n def get_whole_cookie(self) -> dict:\n \"\"\"\n get whole cookie\n :return:\n \"\"\"\n return self.driver.get_cookies()\n\n def quit(self):\n \"\"\"\n close browser\n :return:\n \"\"\"\n self.driver.quit()\n return self\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.quit()\n\nclass LogLevel(IntEnum):\n Off = 0\n Severe = 1\n Warning = 2\n Info = 3\n Debug = 4\n All = 5\n\nclass LogType(IntEnum):\n pass\n\nclass Log():\n def __init__(self, log_dict):\n self.__message = log_dict['message']\n if 'SEVERE' == log_dict['level']:\n self.__level = LogLevel.Severe\n elif 'WARNING' == log_dict['level']:\n self.__level = LogLevel.Warning\n elif 'INFO' == log_dict['level']:\n self.__level = LogLevel.Info\n elif 'DEBUG' == log_dict['level']:\n self.__level = LogLevel.Debug\n else:\n self.__level = None\n self.__type = log_dict['type']\n self.__timestamp = log_dict['timestamp']\n\n def get_message(self):\n return self.__message\n\n def get_type(self):\n return self.__type\n\n def get_level(self):\n return self.__level\n\n def get_timestamp(self):\n return date.fromtimestamp(self.__timestamp / 1000)\n\n def get_timestamp_int(self):\n return self.__timestamp\n\nclass LogFactory():\n @staticmethod\n def create_log(log_dict):\n return Log(log_dict)\n\n @staticmethod\n def create_log_list(log_dict_list):\n return map(\n lambda l: LogFactory.create_log(l),\n log_dict_list\n )" }, { "alpha_fraction": 0.7972972989082336, "alphanum_fraction": 0.7972972989082336, "avg_line_length": 23.66666603088379, "blob_id": "e0d6c5d7fe300dff7491f34ba0faa523cf80701e", "content_id": "ccfe1559bd9efe8b074eba154ac4a4d2dd17ae1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 74, "license_type": "no_license", "max_line_length": 63, "num_lines": 3, "path": "/README.md", "repo_name": "KenjiOhtsuka/pynium", "src_encoding": "UTF-8", "text": "# pynium\n\nThis project is started for creating nice wrapper for selenium.\n" }, { "alpha_fraction": 0.725806474685669, "alphanum_fraction": 0.725806474685669, "avg_line_length": 29.5, "blob_id": "4f4fb2659e88a078e405d7a06ca79fe5b6082697", "content_id": "2c3fee316138264253bd7ec549cd4147496f7b9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 30, "num_lines": 2, "path": "/core/__init__.py", "repo_name": "KenjiOhtsuka/pynium", "src_encoding": "UTF-8", "text": "from .web.web_driver import *\nfrom .web.dom_element import *\n\n" }, { "alpha_fraction": 0.5636098384857178, "alphanum_fraction": 0.5639746785163879, "avg_line_length": 25.611650466918945, "blob_id": "541b1ea43aebd82baff115b6a67c9c9cc669943d", "content_id": "ce6d13dba05aecb64946c62c081b9054c5680ef5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8222, "license_type": "no_license", "max_line_length": 81, "num_lines": 309, "path": "/core/web/dom_element.py", "repo_name": "KenjiOhtsuka/pynium", "src_encoding": "UTF-8", "text": "from datetime import date\nfrom selenium.webdriver.remote import webelement\nfrom selenium.webdriver.common.by import By\n\nclass DomElement():\n def __init__(self, web_element):\n \"\"\"\n\n :param web_element: webelement\n :return:\n \"\"\"\n self.web_element = web_element\n\n def has_element(self, css_selector) -> str:\n \"\"\"\n\n :param css_selector:\n :return:\n \"\"\"\n if 0 < len(self.web_element.find_elements_by_css_selector(css_selector)):\n return True\n return False\n\n def find_element(self, css_selector):\n return self.web_element.find_element_by_css_selector(css_selector)\n\n def find_elements(self, css_selector) -> list:\n elements = self.web_element.find_elements_by_css_selector(css_selector)\n return DomElementFactory.create_dom_element_list(elements)\n\n def click(self):\n \"\"\"\n click the element\n :return:\n \"\"\"\n self.web_element.click()\n return self\n\n def double_click(self):\n \"\"\"\n double click the element\n :return:\n \"\"\"\n driver = self.web_element.parent\n driver.double_click(self)\n return self\n\n def get_classes(self) -> list:\n \"\"\"\n get classes of element and return sorted list\n :return: list\n \"\"\"\n class_attribute = self.get_attribute('class')\n if class_attribute == None:\n return []\n classes = list(set(class_attribute.split()))\n classes.sort()\n return classes\n\n def get_attribute(self, attribute_name) -> str:\n return self.web_element.get_attribute(attribute_name)\n\n def has_attribute(self, attribute_name) -> bool:\n if self.web_element.get_attribute(attribute_name) == None:\n return False\n return True\n\n def get_tag_name(self) -> str:\n return self.web_element.tag_name\n\n def get_parent(self):\n return self.web_element.find_element(By.xpath('..'))\n\n def get_style(self, property_name):\n return self.web_element.value_of_css_property(property_name)\n\n def get_text(self) -> str:\n return self.web_element.text\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n return\n\nclass Input(DomElement):\n def get_value(self) -> str:\n return self.get_attribute('value')\n\nclass TextInput(Input):\n def set_input_text(self, value):\n self.web_element.clear()\n self.web_element.sendKeys(value)\n return self\n\nclass NumberInput(Input):\n def set_input_text(self, value):\n self.web_element.clear()\n self.web_element.sendKeys(value)\n return self\n\nclass CheckBox(Input):\n def is_checked(self) -> bool:\n return self.web_element.is_selected()\n\n def check(self):\n if not self.is_checked():\n self.web_element.click()\n return self\n\nclass RadioButton(Input):\n def is_selected(self):\n return self.web_element.is_selected()\n\n def select(self):\n if not self.is_selected():\n self.web_element.click()\n return self\n\nclass Button(DomElement):\n def get_type(self):\n if 'button' == self.get_attribute('type'):\n return 'button'\n else:\n return 'submit'\n\nclass TextArea(DomElement):\n def get_rows(self) -> int:\n \"\"\"\n get rows value as int\n :return:\n \"\"\"\n rows = self.get_attribute('rows')\n if (None == rows or 0 == len(rows)):\n return None\n return int(rows)\n\n def get_cols(self) -> int:\n \"\"\"\n get cols value as int\n :return:\n \"\"\"\n cols = self.get_attribute('cols')\n if (None == cols or 0 == len(cols)):\n return None\n return int(cols)\n\nclass Anchor(DomElement):\n def get_link_url(self) -> str:\n return self.get_attribute('href')\n\n def get_title(self) -> str:\n return self.get_attribute('title')\n\n def get_target(self) -> str:\n return self.get_attribute('target')\n\nclass Img(DomElement):\n def get_img_src(self) -> str:\n return self.get_attribute('src')\n\n def get_alt(self) -> str:\n return self.get_attribute('alt')\n\nclass Ul(DomElement):\n def get_items(self) -> list:\n li_list = self.web_element.find_elements(By.xpath('/li'))\n return DomElementFactory.create_dom_element_list(li_list)\n\nclass Ol(DomElement):\n def get_items(self) -> list:\n li_list = self.web_element.find_elements(By.xpath('/li'))\n return DomElementFactory.create_dom_element_list(li_list)\n\nclass DomElementFactory():\n @staticmethod\n def create_dom_element(ingredient) -> DomElement:\n \"\"\"\n\n :param ingredient: webelement\n :return:\n \"\"\"\n tag_name = ingredient.tag_name\n if 'input' == tag_name:\n type = ingredient.get_attribute('type')\n if 'text' == type:\n return TextInput(ingredient)\n elif 'number' == type:\n return NumberInput(ingredient)\n elif 'radio' == type:\n return RadioButton(ingredient)\n elif 'checkbox' == type:\n return CheckBox(ingredient)\n return Input(ingredient)\n elif 'textarea' == tag_name:\n return TextArea(ingredient)\n elif 'a' == tag_name:\n return Anchor(ingredient)\n elif 'img' == tag_name:\n return Img(ingredient)\n elif 'ul' == tag_name:\n return Ul(ingredient)\n elif 'ol' == tag_name:\n return Ol(ingredient)\n return DomElement(ingredient)\n\n @staticmethod\n def create_dom_element_list(ingredient_list) -> DomElement:\n \"\"\"\n\n :param ingredient_list: list\n :return:\n \"\"\"\n return map(\n lambda i: DomElementFactory.create_dom_element(i),\n ingredient_list)\n\nclass Cookie():\n def __init__(self, name):\n self.set_name(name)\n self.set_value('')\n self.set_path(None)\n self.set_domain(None)\n self.set_secure(None)\n self.set_expiry(None)\n\n def set_name(self, name):\n self.__name = name\n\n def get_name(self):\n return self.__name\n\n def set_value(self, value):\n self.__value = value\n return self\n\n def get_value(self):\n return self.__value\n\n def set_secure(self, value):\n \"\"\"\n\n :param value: bool\n :return:\n \"\"\"\n self.__secure = value\n return self\n\n def get_secure(self) -> bool:\n return self.__secure\n\n def set_path(self, value):\n \"\"\"\n\n :param value: str\n :return:\n \"\"\"\n self.__path = value\n return self\n\n def get_path(self) -> str:\n return self.__path\n\n def set_domain(self, value):\n self.__domain = value\n return self\n\n def get_domain(self):\n return self.__domain\n\n def set_expiry(self, value):\n self.__expiry = value\n return self\n\n def get_expiry(self):\n return date.fromtimestamp(self.__expiry)\n\n def get_expiry_int(self):\n return self.__expiry\n\n def to_dict(self):\n cookie_dict = {\n 'name': self.get_name(),\n 'value': self.get_value()}\n if None != self.get_path():\n cookie_dict['path'] = self.get_path()\n if None != self.get_domain():\n cookie_dict['domain'] = self.get_domain()\n if None != self.get_secure():\n cookie_dict['secure'] = self.get_secure()\n if None != self.get_expiry():\n cookie_dict['expiry'] = self.get_expiry_int()\n return cookie_dict\n\nclass CookieFactory():\n @staticmethod\n def create_cookie(cookie_dict):\n return Cookie(cookie_dict['name']).\\\n set_domain(cookie_dict['domain']).\\\n set_value(cookie_dict['value']).\\\n set_secure(cookie_dict['secure']).\\\n set_value(cookie_dict['value']).\\\n set_expiry(cookie_dict['expiry'])\n\n @staticmethod\n def create_cookie_list(cookie_dict_list):\n return map(\n lambda c: CookieFactory.create_cookie(c),\n cookie_dict_list)" } ]
4
galyshev/grid
https://github.com/galyshev/grid
e933659caecb3a7dcdeabf5f6bfd5658af956641
a0d8f2fab2d7f011f40b4875cfaf86730631190a
41f7f48a3bfe8c7ff696de8f680538ab7f023e6a
refs/heads/master
2021-01-16T09:23:28.020628
2020-04-02T08:43:55
2020-04-02T08:43:55
243,059,896
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5182567834854126, "alphanum_fraction": 0.5594817399978638, "avg_line_length": 20.769229888916016, "blob_id": "f0e9ac20d2ce6bc7a91ae801277bebb1a1ff5078", "content_id": "a4a84ac56d787f60e3e8cb860b23c41863befa7d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 849, "license_type": "no_license", "max_line_length": 78, "num_lines": 39, "path": "/learn.py", "repo_name": "galyshev/grid", "src_encoding": "UTF-8", "text": "from tkinter import *\n\n\n\nclass Main_window(Frame):\n def __init__(self, root):\n super().__init__(root)\n self.btn_calc()\n\n def btn_calc(self):\n btn_toolb = Frame(bg='red', width=600, height=50)\n btn_toolb.place(x=0, y=0)\n btn_clc = Button(btn_toolb, text='test', width=10, command=self.click)\n btn_clc.place(x=10, y=10)\n\n\n def click(self):\n Second_window()\n\n\nclass Second_window(Toplevel):\n def __init__(self):\n super().__init__(root)\n self.open_window()\n\n def open_window(self):\n self.title('second')\n self.geometry('400x400+400+20')\n self.grab_set()\n\n\nif __name__ == '__main__':\n root = Tk()\n app = Main_window(root)\n app.pack()\n root.title('test')\n root.geometry('600x600+400+20')\n root.resizable(FALSE, FALSE)\n root.mainloop()\n" }, { "alpha_fraction": 0.5101377367973328, "alphanum_fraction": 0.5979341864585876, "avg_line_length": 34.33108139038086, "blob_id": "f918635106fd77d57ddaff97bc1b5e901196857f", "content_id": "8adf28cec41bbe9b19867f3a4d8bd89be27d0df8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5403, "license_type": "no_license", "max_line_length": 78, "num_lines": 148, "path": "/calc.py", "repo_name": "galyshev/grid", "src_encoding": "UTF-8", "text": "import math\nfrom tkinter import *\nfrom tkinter import PhotoImage\n\n\n\ndef grid6(flut, length, width, botl_heigth, grid_heigth):\n app2 = Tk()\n app2.title('6')\n app2.geometry('1020x550+450+250')\n app2.resizable(FALSE, FALSE)\n img6 = PhotoImage(master=app2, file='./pics/gr6.png')\n lbl6 = Label(app2, image=img6)\n lbl6.place(x=0, y=0)\n if flut == 'E':\n s_flut = 2\n else:\n s_flut = 3\n\n width = int(width)\n length = int(length)\n botl_heigth = int(botl_heigth)\n grid_heigth = int(grid_heigth)\n\n # расчет внутренних размеров ящика\n length_box = width * 3 + s_flut * 2 + 3\n width_box = length * 2 + s_flut + 3\n height_box = botl_heigth + s_flut + 4\n lbl = Label(app2, text=length_box, font=(15), fg='red')\n lbl.place(x=100, y=52)\n lbl = Label(app2, text=width_box, font=(15), fg='red')\n lbl.place(x=225, y=52)\n lbl = Label(app2, text=height_box, font=(15), fg='red')\n lbl.place(x=340, y=52)\n\n # расчет габаритов решеток\n l1 = width_box - 5\n l2 = length_box - 5\n height_pros = grid_heigth /2\n lbl = Label(app2, text=l1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=165, y=108)\n lbl = Label(app2, text=l2, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=710, y=105)\n lbl = Label(app2, text=grid_heigth, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=340, y=270)\n lbl = Label(app2, text=grid_heigth, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=962, y=270)\n lbl = Label(app2, text=height_pros, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=220, y=325)\n\n\n # расчет однопросечной решетки\n pros1 = (l1 - 5) / 2\n prosv1 = l1/2\n lbl = Label(app2, text=pros1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=100, y=410)\n lbl = Label(app2, text=pros1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=230, y=410)\n lbl = Label(app2, text=prosv1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=220, y=205)\n lbl = Label(app2, text=prosv1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=90, y=205)\n\n # расчет двупросечной решетки\n pros2_centr = width\n pros2_end = (l2 - pros2_centr - 10) / 2\n lbl = Label(app2, text=pros2_centr, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=720, y=410)\n lbl = Label(app2, text=pros2_end, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=570, y=410)\n lbl = Label(app2, text=pros2_end, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=845, y=410)\n pros2_centr_v2 = width+5\n pros2_end_v2 = (l2 - pros2_centr_v2) / 2\n lbl = Label(app2, text=pros2_centr_v2, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=720, y=220)\n lbl = Label(app2, text=pros2_end_v2, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=570, y=219)\n lbl = Label(app2, text=pros2_end_v2, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=845, y=220)\n\n mainloop()\n\ndef grid6_box(length, width, botl_heigth, grid_heigth):\n app2 = Tk()\n app2.title('6')\n app2.geometry('1020x550+450+250')\n app2.resizable(FALSE, FALSE)\n img6 = PhotoImage(master=app2, file='./pics/gr6.png')\n lbl6 = Label(app2, image=img6)\n lbl6.place(x=0, y=0)\n\n width = int(width)\n length = int(length)\n botl_heigth = int(botl_heigth)\n grid_heigth = int(grid_heigth)\n\n # расчет габаритов решеток\n l1 = width - 5\n l2 = length - 5\n height_pros = grid_heigth / 2\n lbl = Label(app2, text=l1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=165, y=108)\n lbl = Label(app2, text=l2, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=710, y=105)\n lbl = Label(app2, text=grid_heigth, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=340, y=270)\n lbl = Label(app2, text=grid_heigth, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=962, y=270)\n lbl = Label(app2, text=height_pros, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=220, y=325)\n\n # расчет однопросечной решетки\n pros1 = (l1 - 5) / 2\n prosv1 = l1 / 2\n lbl = Label(app2, text=pros1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=100, y=410)\n lbl = Label(app2, text=pros1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=230, y=410)\n lbl = Label(app2, text=prosv1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=220, y=205)\n lbl = Label(app2, text=prosv1, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=90, y=205)\n\n # расчет двупросечной решетки\n\n pros2_centr_v2 = math.ceil(l2 / 3)\n pros2_centr = pros2_centr_v2 - 3\n pros2_end = (l2 - pros2_centr - 10) / 2\n lbl = Label(app2, text=pros2_centr, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=720, y=410)\n lbl = Label(app2, text=pros2_end, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=570, y=410)\n lbl = Label(app2, text=pros2_end, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=845, y=410)\n\n pros2_end_v2 = (l2 - pros2_centr_v2) / 2\n lbl = Label(app2, text=pros2_centr_v2, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=720, y=220)\n lbl = Label(app2, text=pros2_end_v2, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=570, y=219)\n lbl = Label(app2, text=pros2_end_v2, font=('Arial Bold', 18), fg='blue')\n lbl.place(x=845, y=220)\n\n\n mainloop()\n\n# grid6_box( 306, 206, 250, 200)" }, { "alpha_fraction": 0.5260796546936035, "alphanum_fraction": 0.6205832958221436, "avg_line_length": 39.7599983215332, "blob_id": "71487d6ebf0352bcd3f52e803f1330d46257434b", "content_id": "9cd24f01cba29c6e21e4d36c8938e1ec48bbc9e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7309, "license_type": "no_license", "max_line_length": 93, "num_lines": 175, "path": "/calc12.py", "repo_name": "galyshev/grid", "src_encoding": "UTF-8", "text": "import math\nfrom tkinter import *\nfrom tkinter import PhotoImage\n\n\n\ndef grid12(flut, length, width, botl_heigth, grid_heigth):\n app2 = Tk()\n app2.title('12')\n app2.geometry('1020x550+450+250')\n app2.resizable(FALSE, FALSE)\n img = PhotoImage(master=app2, file='./pics/grid12.png')\n lbl6 = Label(app2, image=img)\n lbl6.place(x=0, y=0)\n if flut == 'E':\n s_flut = 2\n else:\n s_flut = 3\n\n width = int(width)\n length = int(length)\n botl_heigth = int(botl_heigth)\n grid_heigth = int(grid_heigth)\n\n # расчет внутренних размеров ящика\n length_box = width * 4 + s_flut * 3 + 3\n width_box = length * 3 + s_flut * 2 + 3\n height_box = botl_heigth + s_flut + 4\n\n lbl = Label(app2, text=length_box, font=(15), fg='red')\n lbl.place(x=100, y=52)\n lbl = Label(app2, text=width_box, font=(15), fg='red')\n lbl.place(x=225, y=52)\n lbl = Label(app2, text=height_box, font=(15), fg='red')\n lbl.place(x=340, y=52)\n\n # расчет габаритов решеток\n l1 = width_box - 5\n l2 = length_box - 5\n height_pros = grid_heigth /2\n\n lbl_l1 = Label(app2, text=l1, font=('Arial Bold', 18), fg='blue')\n lbl_l1.place(x=745, y=110)\n lbl_l2 = Label(app2, text=l2, font=('Arial Bold', 18), fg='blue')\n lbl_l2.place(x=220, y=110)\n lbl_w1 = Label(app2, text=grid_heigth, font=('Arial Bold', 18), fg='blue')\n lbl_w1.place(x=480, y=310)\n lbl_w2 = Label(app2, text=grid_heigth, font=('Arial Bold', 18), fg='blue')\n lbl_w2.place(x=940, y=310)\n lbl_half_w = Label(app2, text=height_pros, font=('Arial Bold', 18), fg='blue')\n lbl_half_w.place(x=375, y=385)\n\n\n # расчет двухпросечной решетки\n pros1_centr = length\n pros1_end = (l1 - pros1_centr - 10) / 2\n pros1_centr_h1 = pros1_centr + 5\n pros1_end_h1 = (l1 - pros1_centr_h1) / 2\n\n lbl_pros1_centr = Label(app2, text=pros1_centr, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_centr.place(x=745, y=485)\n lbl_pros1_end = Label(app2, text=pros1_end, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_end.place(x=640, y=485)\n lbl_pros1_end = Label(app2, text=pros1_end, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_end.place(x=845, y=485)\n lbl_pros1_end_h1 = Label(app2, text=pros1_end_h1, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_end_h1.place(x=640, y=250)\n lbl_pros1_end_h1 = Label(app2, text=pros1_end_h1, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_end_h1.place(x=845, y=250)\n lbl_pros1_centr_h1 = Label(app2, text=pros1_centr_h1, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_centr_h1.place(x=740, y=250)\n\n # расчет трехпросечной решетки\n pros2_centr = width\n pros2_end = (l2 - pros2_centr*2 - 15) / 2\n pros2_centr_v2 = width + 5\n pros2_end_v2 = (l2 - pros2_centr_v2 * 2) / 2\n\n lbl_pros2_centr = Label(app2, text=pros2_centr, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_centr.place(x=175, y=485)\n lbl_pros2_centr = Label(app2, text=pros2_centr, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_centr.place(x=275, y=485)\n lbl_pros2_end = Label(app2, text=pros2_end, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_end.place(x=70, y=485)\n lbl_pros2_end = Label(app2, text=pros2_end, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_end.place(x=375, y=485)\n lbl_pros2_centr_v2 = Label(app2, text=pros2_centr_v2, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_centr_v2.place(x=175, y=250)\n lbl_pros2_centr_v2 = Label(app2, text=pros2_centr_v2, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_centr_v2.place(x=275, y=250)\n lbl_pros2_end_v2 = Label(app2, text=pros2_end_v2, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_end_v2.place(x=75, y=250)\n lbl_pros2_end_v2 = Label(app2, text=pros2_end_v2, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_end_v2.place(x=375, y=250)\n\n mainloop()\n\ndef grid12_box(length, width, botl_heigth, grid_heigth):\n app2 = Tk()\n app2.title('12')\n app2.geometry('1020x550+450+250')\n app2.resizable(FALSE, FALSE)\n img = PhotoImage(master=app2, file='./pics/grid12.png')\n lbl6 = Label(app2, image=img)\n lbl6.place(x=0, y=0)\n\n width = int(width)\n length = int(length)\n grid_heigth = int(grid_heigth)\n\n # расчет габаритов решеток\n l1 = width - 5\n l2 = length - 5\n height_pros = grid_heigth / 2\n\n lbl_l1 = Label(app2, text=l1, font=('Arial Bold', 18), fg='blue')\n lbl_l1.place(x=745, y=115)\n lbl_l2 = Label(app2, text=l2, font=('Arial Bold', 18), fg='blue')\n lbl_l2.place(x=220, y=115)\n lbl_grid_heigth = Label(app2, text=grid_heigth, font=('Arial Bold', 18), fg='blue')\n lbl_grid_heigth.place(x=480, y=310)\n lbl_grid_heigth = Label(app2, text=grid_heigth, font=('Arial Bold', 18), fg='blue')\n lbl_grid_heigth.place(x=940, y=310)\n lbl_half_w = Label(app2, text=height_pros, font=('Arial Bold', 18), fg='blue')\n lbl_half_w.place(x=375, y=385)\n\n\n # расчет двухпросечной решетки\n pros1_centr_h1 = math.ceil(l1 / 3)+3\n pros1_centr = pros1_centr_h1 - 5\n pros1_end = (l1 - pros1_centr - 10) / 2\n pros1_end_h1 = (l1 - pros1_centr_h1) / 2\n\n lbl_pros1_centr = Label(app2, text=pros1_centr, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_centr.place(x=750, y=485)\n lbl_pros1 = Label(app2, text=pros1_end, font=('Arial Bold', 18), fg='blue')\n lbl_pros1.place(x=640, y=485)\n lbl_pros1 = Label(app2, text=pros1_end, font=('Arial Bold', 18), fg='blue')\n lbl_pros1.place(x=845, y=485)\n lbl_pros1_end_h1 = Label(app2, text=pros1_end_h1, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_end_h1.place(x=640, y=250)\n lbl_pros1_end_h1 = Label(app2, text=pros1_end_h1, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_end_h1.place(x=845, y=250)\n lbl_pros1_centr_h1 = Label(app2, text=pros1_centr_h1, font=('Arial Bold', 18), fg='blue')\n lbl_pros1_centr_h1.place(x=750, y=250)\n\n # расчет трехпросечной решетки\n pros2_centr_v2 = math.ceil(l2 / 4)+3\n pros2_centr = pros2_centr_v2 - 5\n pros2_end = (l2 - pros2_centr * 2 - 15) / 2\n pros2_end_v2 = (l2 - pros2_centr_v2 * 2) / 2\n\n lbl_pros2_centr = Label(app2, text=pros2_centr, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_centr.place(x=175, y=485)\n lbl_pros2_centr = Label(app2, text=pros2_centr, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_centr.place(x=275, y=485)\n lbl_pros2_end = Label(app2, text=pros2_end, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_end.place(x=70, y=485)\n lbl_pros2_end = Label(app2, text=pros2_end, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_end.place(x=375, y=485)\n lbl_pros2_centr_v2 = Label(app2, text=pros2_centr_v2, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_centr_v2.place(x=175, y=250)\n lbl_pros2_centr_v2 = Label(app2, text=pros2_centr_v2, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_centr_v2.place(x=275, y=250)\n lbl_pros2_end_v2 = Label(app2, text=pros2_end_v2, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_end_v2.place(x=75, y=250)\n lbl_pros2_end_v2 = Label(app2, text=pros2_end_v2, font=('Arial Bold', 18), fg='blue')\n lbl_pros2_end_v2.place(x=375, y=250)\n\n\n\n\n mainloop()\n\n# grid12_box( 300, 200, 250, 200)" }, { "alpha_fraction": 0.5257219076156616, "alphanum_fraction": 0.5638194680213928, "avg_line_length": 36.4636344909668, "blob_id": "092b07a720302087027894066be6f4f0a5ddf5ee", "content_id": "8d971ad225a7dfb22abbc7d75e7305d18723b3bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9367, "license_type": "no_license", "max_line_length": 120, "num_lines": 220, "path": "/main.py", "repo_name": "galyshev/grid", "src_encoding": "UTF-8", "text": "from tkinter import *\nfrom tkinter import messagebox, PhotoImage\nfrom tkinter.ttk import Combobox\nimport calc, calc12\n\n\nclass Main_window(Frame):\n def __init__(self, root):\n super().__init__(root)\n self.input_view()\n\n def input_view(self):\n tollb = Frame(width=500, height=200)\n tollb.place(x=0, y=0)\n lbl_header = Label(tollb, text='Расчет решеток', font=('Arial Bold', 25))\n lbl_header.place(x=120, y=0)\n\n btn_bottl = Button(tollb, text='по размерам бутылки', bg='#C9C539', font=(15), width=20, command=self.cl_botle)\n btn_bottl.place(x=40, y=80)\n\n btn_bottl = Button(tollb, text='по размерам ящика', bg='#C9C539', font=(15), width=20, command=self.cl_box)\n btn_bottl.place(x=270, y=80)\n\n def cl_botle(self):\n Bottle_view()\n\n def cl_box(self):\n Box_view()\n\n\nclass Bottle_view(Toplevel):\n def __init__(self):\n super().__init__(root)\n self.open_window()\n\n def open_window(self):\n self.title('Расчет по размерам бутылки')\n self.geometry('565x400+700+400')\n self.grab_set()\n self.resizable(FALSE, FALSE)\n\n self.img = PhotoImage(master=self, file='./pics/box-6.png')\n self.lbl = Label(self, image=self.img)\n self.lbl.place(x=350, y=25)\n\n # Заголовок всплывающего окна\n lbl = Label(self, text='Расчет по размерам бутылки', font=('Arial Bold', 15))\n lbl.place(x=150, y=10)\n\n # ввод значений\n lbl = Label(self, text='''Построение ведется из расчета - длина бутылки по стороне из меньшего кол-ва вложений,\nширина - из большего. Если надо наоборот - ввести ширину бутылки в поле ввода длины, \nа длину бутылки - в поле ввода ширины.\nЕсли бутылка круглая, длину и ширину вводить одинаковую''', font=('Arial Bold', 9))\n lbl.place(x=20, y=300)\n\n lbl = Label(self, text='Длина бутылки (L)', font=('Arial Bold', 10))\n lbl.place(x=20, y=50)\n\n lbl = Label(self, text='Ширина бутылки (W)', font=('Arial Bold', 10))\n lbl.place(x=20, y=100)\n\n lbl = Label(self, text='Высота бутылки в сборе', font=('Arial Bold', 10))\n lbl.place(x=20, y=150)\n\n lbl = Label(self, text='Высота решетки', font=('Arial Bold', 10))\n lbl.place(x=20, y=200)\n\n entry_length = Entry(self, width=15)\n entry_length.place(x=220, y=52)\n entry_length.focus()\n\n entry_width = Entry(self, width=15)\n entry_width.place(x=220, y=102)\n entry_width.focus()\n\n entry_botl_heigth = Entry(self, width=15)\n entry_botl_heigth.place(x=220, y=152)\n entry_botl_heigth.focus()\n\n entry_grid_heigth = Entry(self, width=15)\n entry_grid_heigth.place(x=220, y=202)\n entry_grid_heigth.focus()\n\n combo_bottl = Combobox(self, width=20)\n combo_bottl['values'] = (6, 12)\n combo_bottl.place(x=20, y=270)\n lbl = Label(self, text='Выбрать количество бутылок')\n lbl.place(x=15, y=240)\n\n combo_flut = Combobox(self, width=20)\n combo_flut['values'] = ('E', 'B')\n combo_flut.place(x=220, y=270)\n lbl = Label(self, text='Выбрать профиль решетки')\n lbl.place(x=220, y=240)\n\n def clic():\n if combo_bottl.get() == '':\n messagebox.showinfo('', 'Не выбрано количество бутылок')\n elif combo_flut.get() == '':\n messagebox.showinfo('', 'Не выбран профиль решетки')\n elif entry_length.get() == '':\n messagebox.showinfo('', 'Нужно ввести длину бутыки')\n elif entry_width.get() == '':\n messagebox.showinfo('', 'Нужно ввести ширину бутыки')\n elif entry_grid_heigth.get() == '':\n messagebox.showinfo('', 'Нужно ввести высоту решетки')\n elif entry_botl_heigth.get() == '':\n messagebox.showinfo('', 'Нужно ввести высоту бутыки в сборе')\n else:\n botll_choice = combo_bottl.get()\n flut = combo_flut.get()\n length = int(entry_length.get())\n width = int(entry_width.get())\n botl_heigth = int(entry_botl_heigth.get())\n grid_heigth = int(entry_grid_heigth.get())\n root.destroy()\n if botll_choice == '6':\n calc.grid6(flut, length, width, botl_heigth, grid_heigth)\n if botll_choice == '12':\n calc12.grid12(flut, length, width, botl_heigth, grid_heigth)\n # else:\n # messagebox.showinfo('', 'Расчет для такого количества бутылок не существует')\n\n btn_calc = Button(self, text='Расчет', font=(20), width=65, bg='#C9C539', command=clic)\n btn_calc.place(x=0, y=370)\n\n\nclass Box_view(Toplevel):\n def __init__(self):\n super().__init__(root)\n self.open_window()\n\n def open_window(self):\n self.title('Расчет по размерам ящика')\n self.geometry('565x400+700+400')\n self.grab_set()\n self.resizable(FALSE, FALSE)\n\n # Заголовок всплывающего окна\n lbl = Label(self, text='Расчет по размерам ящика', font=('Arial Bold', 15))\n lbl.place(x=180, y=10)\n\n # ввод значений\n lbl = Label(self, text='''Построение ведется из расчета - длина бутылки по стороне из меньшего кол-ва вложений,\n ширина - из большего. Если надо наоборот - ввести ширину ящика в поле ввода длины, \n а длину ящика - в поле ввода ширины.''', font=('Arial Bold', 9))\n lbl.place(x=20, y=300)\n\n lbl = Label(self, text='Длина ящика', font=('Arial Bold', 10))\n lbl.place(x=100, y=50)\n\n lbl = Label(self, text='Ширина ящика', font=('Arial Bold', 10))\n lbl.place(x=100, y=100)\n\n lbl = Label(self, text='Высота ящика', font=('Arial Bold', 10))\n lbl.place(x=100, y=150)\n\n lbl = Label(self, text='Высота решетки', font=('Arial Bold', 10))\n lbl.place(x=100, y=200)\n\n entry_length = Entry(self, width=15)\n entry_length.place(x=350, y=52)\n entry_length.focus()\n\n entry_width = Entry(self, width=15)\n entry_width.place(x=350, y=102)\n entry_width.focus()\n\n entry_heigth = Entry(self, width=15)\n entry_heigth.place(x=350, y=152)\n entry_heigth.focus()\n\n entry_grid_heigth = Entry(self, width=15)\n entry_grid_heigth.place(x=350, y=202)\n entry_grid_heigth.focus()\n\n combo_bottl = Combobox(self, width=20)\n combo_bottl['values'] = (6, 12)\n combo_bottl.place(x=350, y=260)\n lbl = Label(self, text='Выбрать количество бутылок')\n lbl.place(x=85, y=260)\n\n def clic_box():\n if combo_bottl.get() == '':\n messagebox.showinfo('', 'Не выбрано количество бутылок')\n elif entry_length.get() == '':\n messagebox.showinfo('', 'Нужно ввести длину ящика')\n elif entry_width.get() == '':\n messagebox.showinfo('', 'Нужно ввести ширину ящика')\n elif entry_grid_heigth.get() == '':\n messagebox.showinfo('', 'Нужно ввести высоту решетки')\n elif entry_heigth.get() == '':\n messagebox.showinfo('', 'Нужно ввести высоту ящика')\n else:\n botll_choice = combo_bottl.get()\n length = int(entry_length.get())\n width = int(entry_width.get())\n heigth = int(entry_heigth.get())\n grid_heigth = int(entry_grid_heigth.get())\n root.destroy()\n if botll_choice == '6':\n calc.grid6_box(length, width, heigth, grid_heigth)\n if botll_choice == '12':\n calc12.grid12_box(length, width, heigth, grid_heigth)\n # else:\n # messagebox.showinfo('title', 'Расчет для такого количества бутылок не существует')\n\n btn_calc = Button(self, text='Расчет', font=(20), width=65, bg='#C9C539', command=clic_box)\n btn_calc.place(x=0, y=370)\n\n\nif __name__ == '__main__':\n root = Tk()\n app = Main_window(root)\n app.pack()\n root.title('Расчет решеток')\n root.geometry('500x150+700+400')\n root.resizable(FALSE, FALSE)\n root.mainloop()\n" } ]
4
nitstorm/udacity-cs253
https://github.com/nitstorm/udacity-cs253
f8d653f3f32e6cb668a031e607ff720edfdbb12d
7aec556d7ca434463c212853e36c5576485f3d88
902538bb653b87ad816f34d73ba13954c9879499
refs/heads/master
2020-01-21T18:54:42.306362
2014-01-05T17:09:24
2014-01-05T17:09:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5889531373977661, "alphanum_fraction": 0.5915221571922302, "avg_line_length": 29.52941131591797, "blob_id": "3f06a98496cb2f79a868b7395eae06b5fa828ab0", "content_id": "4b9794b2da1638371fe7d40360a9475ae8935059", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1557, "license_type": "no_license", "max_line_length": 78, "num_lines": 51, "path": "/u2_hw/practice.py", "repo_name": "nitstorm/udacity-cs253", "src_encoding": "UTF-8", "text": "import webapp2\nfrom inquiz import valid_day\nfrom inquiz import valid_month\nfrom inquiz import valid_year\nfrom inquiz import escape_html\n\nform=\"\"\"\n<form method=\"post\" action=\"/\">\n\tWhat is your bday?\n\t<br>\n\t<label> Month\t<input type=\"text\" name=\"month\" value=\"%(month)s\"> </label>\n\t<label> Day\t<input type=\"text\" name=\"day\" value=\"%(day)s\"> </label>\n\t<label> Year\t<input type=\"text\" name=\"year\" value=\"%(year)s\"> </label>\n <div>%(error)s</div>\n\t<input type=\"submit\">\t\n</form>\n\n\"\"\"\n\nclass MainPage(webapp2.RequestHandler):\n def write_form(self,error=\"\", month=\"\",day=\"\",year=\"\"):\n self.response.out.write(form % {\"error\":escape_html(error),\n \"month\":escape_html(month),\n \"day\":escape_html(day),\n \"year\":escape_html(year)})\n\n def get(self):\n self.write_form()\n\n def post(self):\n \tuser_month = self.request.get('month')\n \tuser_day = self.request.get('day')\n \tuser_year = self.request.get('year')\n \t\n month = valid_month(user_month)\n day = valid_day(user_day)\n year = valid_year(user_year)\n\n \tif not (year and day and month):\n \t\tself.write_form(\"That doesn't look valid\",user_month,user_day,user_year)\n \telse:\n\t \tself.redirect(\"/thanks\")\n\t\nclass ThanksHandler(webapp2.RequestHandler):\n def get(self):\n self.response.out.write(\"Thanks, totally valid date!\") \n\napplication = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/thanks',ThanksHandler)\n], debug=True)\n" }, { "alpha_fraction": 0.5849893689155579, "alphanum_fraction": 0.5988289713859558, "avg_line_length": 23.93362808227539, "blob_id": "c5ae5c6e27c51ff0926f154d0383de038a9707e5", "content_id": "171a4159b75ba791879cecd8e1b4d54670333427", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5636, "license_type": "no_license", "max_line_length": 98, "num_lines": 226, "path": "/u2_hw/inquiz.py", "repo_name": "nitstorm/udacity-cs253", "src_encoding": "UTF-8", "text": "# -----------\n# User Instructions\n# \n# Modify the valid_month() function to verify \n# whether the data a user enters is a valid \n# month. If the passed in parameter 'month' \n# is not a valid month, return None. \n# If 'month' is a valid month, then return \n# the name of the month with the first letter \n# capitalized.\n#\n\nmonths = ['January',\n 'February',\n 'March',\n 'April',\n 'May',\n 'June',\n 'July',\n 'August',\n 'September',\n 'October',\n 'November',\n 'December']\n \ndef valid_month(month):\n month = month.capitalize()\n if month in months:\n return month\n else:\n month_abbvs = dict((m[:3].lower(),m) for m in months)\n month = month[:3].lower()\n return month_abbvs.get(month)\n\n\n\n\nprint(valid_month(\"january\")) #=> \"January\" \nprint(valid_month(\"January\")) #=> \"January\"\nprint(valid_month(\"foo\")) #=> None\nprint(valid_month(\"\")) #=> None\n\n####\n# Using dictionary to look up 3 letter months (Done by Steve)\n####\n\"\"\" Added to valid_month in else clause\nmonth_abbvs = dict((m[:3].lower(),m) for m in months)\n\ndef valid_month2(month):\n month = month[:3].lower()\n return month_abbvs.get(month)\n\nprint(valid_month2(\"january\")) #=> \"January\" \nprint(valid_month2(\"jan\")) #=> \"January\" \nprint(valid_month2(\"January\")) #=> \"January\"\nprint(valid_month2(\"foo\")) #=> None\nprint(valid_month2(\"\")) #=> None\n\"\"\"\n#####################################\n\n# -----------\n# User Instructions\n# \n# Modify the valid_day() function to verify \n# whether the string a user enters is a valid \n# day. The valid_day() function takes as \n# input a String, and returns either a valid \n# Int or None. If the passed in String is \n# not a valid day, return None. \n# If it is a valid day, then return \n# the day as an Int, not a String. Don't \n# worry about months of different length. \n# Assume a day is valid if it is a number \n# between 1 and 31.\n# Be careful, the input can be any string \n# at all, you don't have any guarantees \n# that the user will input a sensible \n# day.\n#\n\ndef valid_day(day):\n if day.isdigit():\n day = int(day)\n if day >0 and day<32:\n return day\n else:\n return None\n else:\n return None\n\nprint(valid_day('0'))# => None \n# valid_day('1') => 1\nprint(valid_day('15'))# => 15\n# valid_day('500') => None\n\n###################################\n\n# -----------\n# User Instructions\n# \n# Modify the valid_year() function to verify \n# whether the string a user enters is a valid \n# year. If the passed in parameter 'year' \n# is not a valid year, return None. \n# If 'year' is a valid year, then return \n# the year as a number. Assume a year \n# is valid if it is a number between 1900 and \n# 2020.\n#\n\ndef valid_year(year):\n if year.isdigit():\n year = int(year)\n if year>=1900 and year<=2020:\n return year\n else:\n return None\n else:\n return None\n\n\nprint(valid_year('0'))# => None \nprint(valid_year('-11'))# => None\n# valid_year('1950') => 1950\nprint(valid_year('2000'))# => 2000\n\n#################\n\n# User Instructions\n# \n# Write a function 'sub1' that, given a \n# string, embeds that string in \n# the string: \n# \"I think X is a perfectly normal thing to do in public.\"\n# where X is replaced by the given \n# string.\n#\n\ngiven_string = \"I think %s is a perfectly normal thing to do in public.\"\ndef sub1(s):\n return given_string %s\n\n\nprint(sub1(\"running\"))\n# => \"I think running is a perfectly normal thing to do in public.\" \nprint(sub1(\"sleeping\")) \n# => \"I think sleeping is a perfectly normal thing to do in public.\"\n\n#########################\n\n# User Instructions\n# \n# Write a function 'sub2' that, given two \n# strings, embeds those strings in the string: \n# \"I think X and Y are perfectly normal things to do in public.\"\n# where X and Y are replaced by the given \n# strings.\n#\n\ngiven_string2 = \"I think %s and %s are perfectly normal things to do in public.\"\ndef sub2(s1, s2):\n return given_string2 %(s1,s2)\n\nprint(sub2(\"running\", \"sleeping\"))\n# => \"I think running and sleeping are perfectly normal things to do in public.\"\nprint(sub2(\"sleeping\", \"running\"))\n# => \"I think sleeping and running are perfectly normal things to do in public.\"\n\n######################\n\n# User Instructions\n# \n# Write a function 'sub_m' that takes a \n# name and a nickname, and returns a \n# string of the following format: \n# \"I'm NICKNAME. My real name is NAME, but my friends call me NICKNAME.\"\n# \n\ngiven_string2 = \"I'm %(nickname)s. My real name is %(name)s, but my friends call me %(nickname)s.\"\ndef sub_m(name, nickname):\n return given_string2 % {\"nickname\":nickname,\n \"name\":name}\n \n\nprint(sub_m(\"Mike\", \"Goose\"))\n# => \"I'm Goose. My real name is Mike, but my friends call me Goose.\"\n\n#######################\n\n# User Instructions\n# \n# Implement the function escape_html(s), which replaces:\n# > with &gt;\n# < with &lt;\n# \" with &quot;\n# & with &amp;\n# and returns the escaped string\n# Note that your browser will probably automatically \n# render your escaped text as the corresponding symbols, \n# but the grading script will still correctly evaluate it.\n# \n\"\"\"\nesc = {\"&\":\"&amp;\",\n \">\":\"&gt;\",\n \"<\":\"&lt;\",\n '\"':\"&quote;\"}\n\ndef escape_html(s):\n for i in esc:\n s = s.replace(i,esc[i])\n #print s\n return s\n\ndef escape_html(s):\n for (i,o) in esc:\n s = s.replace(i,o)\n return s\n\"\"\"\n\nimport cgi\ndef escape_html(s):\n return cgi.escape(s,quote=True)\n\nprint(escape_html(\"<b>Bold done. Now & and \\\"\"))\n\n######################\n\n" }, { "alpha_fraction": 0.5039950609207153, "alphanum_fraction": 0.515673041343689, "avg_line_length": 24.825397491455078, "blob_id": "72fd691cdf7712c285c7ebba2d878e63ce0dbe1d", "content_id": "82b88f44fd31963f1c08507ff4d3fd2d114934f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1627, "license_type": "no_license", "max_line_length": 51, "num_lines": 63, "path": "/u2_hw/probset2.py", "repo_name": "nitstorm/udacity-cs253", "src_encoding": "UTF-8", "text": "import webapp2\nimport cgi\n\nform=\"\"\"\n<form method=\"post\" action=\"/\">\n\t<h1>Rot 13</h1>\n <p>Write your text to get the Rot 13</p>\n <textarea name=\"text\">%s</textarea>\n <input type=\"submit\">\t\n</form>\n\n\"\"\"\n\nclass MainPage(webapp2.RequestHandler):\n def charfind(self,c):\n c = c.lower()\n char_list=\"abcdefghijklmnopqrstuvwxyz\"\n pos = char_list.index(c)+1\n if pos > 13:\n res = (pos+13) % 26\n else:\n res = pos+13\n return char_list[res-1:res]\n\n def rotconvert(self,q):\n result = []\n for char in q:\n if char.isalpha():\n res = self.charfind(char)\n if char.islower():\n result.append(res)\n elif char.isupper():\n res = res.upper()\n result.append(res)\n elif char.isspace() or char.isdigit():\n result.append(char)\n else:\n char = cgi.escape(char,quote=True)\n result.append(char)\n\n #self.response.out.write(result)\n #self.response.out.write(\"<br>\")\n final = \"\"\n for i in result:\n final = final+i\n #self.response.out.write(final)\n return final\n\n def write_form(self,output_text=\"\"):\n self.response.out.write(form % output_text)\n\n def get(self):\n self.write_form()\n\n def post(self):\n q = self.request.get('text')\n result = self.rotconvert(q)\n self.write_form(result)\n #self.write_form()\n\napplication = webapp2.WSGIApplication([\n ('/', MainPage)\n], debug=True)\n" }, { "alpha_fraction": 0.5322874784469604, "alphanum_fraction": 0.5395841002464294, "avg_line_length": 31.247058868408203, "blob_id": "ee541af87e78398b5c9a06327a224bcd48b74e6d", "content_id": "8eb6916e6342d27e76c0363d2ae52047d99bfd58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2741, "license_type": "no_license", "max_line_length": 117, "num_lines": 85, "path": "/u2_hw/probset2-login.py", "repo_name": "nitstorm/udacity-cs253", "src_encoding": "UTF-8", "text": "import webapp2\nimport re\n\nform=\"\"\"\n<h1>Login</h1>\n<form method=\"post\" action=\"/\">\n\t<label>Username: <input type=\"text\" name=\"username\"></label><span class=\"error\">%(uname_error)s</span><br>\n <label>Password: <input type=\"password\" name=\"password\"></label><span class=\"error\">%(pwd_error)s</span><br>\n <label>Verify password: <input type=\"password\" name=\"verify\"></label><span class=\"error\">%(ver_error)s</span><br>\n <label>Email(optional): <input type=\"email\" name=\"email\"></label><span class=\"error\">%(email_error)s</span><br>\n <input type=\"submit\">\t\n</form>\n\n\"\"\"\n\nclass MainPage(webapp2.RequestHandler):\n \n def write_form(self,uname_error=\"\",pwd_error=\"\",ver_error=\"\",email_error=\"\"):\n self.response.headers['Content-Type'] = 'text/html'\n self.response.out.write(form % {\"uname_error\":uname_error,\n \"pwd_error\":pwd_error,\n \"ver_error\":ver_error,\n \"email_error\":email_error\n })\n\n def checker(self,uname,pwd,ver,email=\"\"):\n uname_re = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\n pwd_re = re.compile(r\"^.{3,20}$\")\n email_re = re.compile(r\"^[\\S]+@[\\S]+\\.[\\S]+$\")\n\n error_code = 0\n uname_error = \"\"\n email_error = \"\"\n pwd_error = \"\"\n ver_error = \"\"\n\n if email != \"\":\n if not email_re.match(email):\n email_error = \"Invalid e-mail\"\n error_code = 1\n\n if not uname_re.match(uname):\n uname_error = \"Invalid username\"\n error_code = 1\n\n if not pwd_re.match(pwd):\n pwd_error = \"Invalid password\"\n error_code = 1\n\n if not pwd == ver:\n ver_error = \"Passwords don't match\"\n error_code = 1\n\n if error_code == 1:\n self.write_form(uname_error,pwd_error,ver_error,email_error)\n else:\n url = \"/welcome?uname=%s\" %uname\n self.redirect(url)\n\n\n\n def get(self):\n self.write_form()\n\n def post(self):\n uname = self.request.get('username')\n pwd = self.request.get('password')\n ver = self.request.get('verify')\n email = self.request.get('email')\n\n self.checker(uname,pwd,ver,email)\n\n \n# Welcome Page completed and works -> /welcome?uname=Nitin\nclass WelcomePage(webapp2.RequestHandler):\n def get(self):\n uname = self.request.get('uname')\n msg = \"Welcome %s\"\n self.response.headers['Content-Type'] = 'text/plain'\n self.response.out.write(msg % uname)\n\napplication = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/welcome', WelcomePage)\n], debug=True)\n" } ]
4
mrvollger/GolgaPrimates
https://github.com/mrvollger/GolgaPrimates
bf8e322b10074f329e2084b8c0b5d9f9f33dbc07
d882d4b55d020800979fb7aaee26f39bc48d2651
1438b26b7abb618563105df997b5795dc04e1011
refs/heads/master
2020-04-10T01:15:15.084601
2018-12-06T17:44:43
2018-12-06T17:44:43
160,710,379
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7254902124404907, "alphanum_fraction": 0.7352941036224365, "avg_line_length": 50, "blob_id": "350fd976f937980ce739590d8cde55e3faa93213", "content_id": "c7099818bfa97299aaab4acdce3edb84ee75f57c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 102, "license_type": "no_license", "max_line_length": 89, "num_lines": 2, "path": "/beds.sh", "repo_name": "mrvollger/GolgaPrimates", "src_encoding": "UTF-8", "text": "#!/bin/bash\nfind /net/eichler/vol2/home/mvollger/assemblies/*/*/Segdups/asm.bed -type f > asmbeds.txt\n" }, { "alpha_fraction": 0.7029703259468079, "alphanum_fraction": 0.7029703259468079, "avg_line_length": 49, "blob_id": "ce7a651116e4fe8cf49c831123a37cdaeb1ae04e", "content_id": "3327b0da4376ad66b0f2b9ff780652ca6f765d4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 101, "license_type": "no_license", "max_line_length": 87, "num_lines": 2, "path": "/cmd.sh", "repo_name": "mrvollger/GolgaPrimates", "src_encoding": "UTF-8", "text": "#!/bin/bash\nbedtools intersect -wao -a golga.merged.bed -b $(cat asmbeds.txt) > overlap.by.asm.bed\n\n" }, { "alpha_fraction": 0.6457194685935974, "alphanum_fraction": 0.6530054807662964, "avg_line_length": 29.901409149169922, "blob_id": "e5b9e7e9bc636c27a17e84d876ea558292c34a2a", "content_id": "e4a9cd1905b7b93f93fad756bf144139f318b69c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2196, "license_type": "no_license", "max_line_length": 178, "num_lines": 71, "path": "/makeTbl.py", "repo_name": "mrvollger/GolgaPrimates", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport argparse\nimport sys\n\nparser = argparse.ArgumentParser(description=\"\")\nparser.add_argument(\"infile\", nargs=\"?\", help=\"input bam file\", type=argparse.FileType('r'), default=sys.stdin)\nparser.add_argument(\"outfile\",nargs=\"?\", help=\"output bam file\", type=argparse.FileType('w'), default=sys.stdout)\nparser.add_argument('-d', action=\"store_true\", default=False)\nargs = parser.parse_args()\n\nimport glob\nimport os\nimport sys\nimport re\nimport itertools \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\nargs.infile = \"overlap.by.asm.bed\"\n\ndef pairNum():\n\tf = open(\"asmbeds.txt\")\n\tconv = {}\n\tfor idx, line in enumerate(f):\n\t\tif(line[0] == \"#\"):\n\t\t\tcontinue\n\t\tline = line.strip().split(\"/\")\n\t\t#print(line[7])\n\t\tconv[str(idx+1)] = line[7].lower()\n\treturn(conv)\n\n\ndf = pd.read_table(args.infile)\ndf.columns = [\"chr\", \"start\", \"end\", \"golga_id\", \"strand\", \"genome\", \"aln_chr\", \"aln_start\", \"aln_end\", \"contig\", \"contig_start\", \"contig_end\", \"contig_len\", \"aln_ID\", \"overlap\"]\nconv = pairNum()\n\ndf[\"golga_id\"]=df[\"golga_id\"].astype(str)\ndf[\"strand\"]=df[\"strand\"].astype(str)\n\ndf[\"genome\"] = df[\"genome\"].replace(conv)\n\ndf[\"%_covered\"] = df[\"overlap\"]/(df[\"end\"]-df[\"start\"])*100\n\n\n# require the majority of the region to be covered\ndf = df[df[\"%_covered\"] > 90.0]\n\n# require the majority of the cotig to be aligned \ndf[\"contig_end\"] = df[\"contig_end\"].astype(int)\ndf[\"contig_len\"] = df[\"contig_len\"].astype(int)\ndf[\"contig_start\"] = df[\"contig_start\"].astype(int)\ndf[\"frac_of_contig_in_aln\"] = (df[\"contig_end\"]-df[\"contig_start\"])/df[\"contig_len\"]\n\n\n#df.sort_values(by=[\"frac_of_contig_in_aln\"], inplace=True)\ndf['frac_max'] = df.groupby(['contig'])['frac_of_contig_in_aln'].transform(max)\ndf = df[ df[\"frac_of_contig_in_aln\"] >= df[\"frac_max\"] ]\ndf.drop(columns = [\"frac_max\"], inplace=True)\n\ndf.sort_values(by=[\"chr\", \"start\", \"end\", \"genome\"], inplace=True)\ndf.to_csv(\"tbl.tab\", index=False, sep=\"\\t\")\n\n\nprint(df)\nif(True):\n\tdf2 = df.loc[:, \"chr\":\"genome\"]\n\tdf2 = pd.DataFrame(df2.groupby([\"chr\", \"start\", \"end\", \"golga_id\", \"strand\"]).aggregate(set))\n\tdf2[\"genome\"] = df2[\"genome\"].apply(sorted).str.join(\", \")\n\tdf2.to_csv(\"simple.tab\", sep=\"\\t\")\n\n\n" } ]
3
Wuerike/SLP
https://github.com/Wuerike/SLP
4d8aa4d8ff5bf337d16e8931f0e0ada4fd1e0982
41e150888b730e361babaebac4cf2432006e6715
9bfb17ad98f73e3a132b15edba0a380b653a8a17
refs/heads/master
2023-03-09T21:07:21.950583
2021-02-25T11:45:31
2021-02-25T11:45:31
342,227,703
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.665593147277832, "alphanum_fraction": 0.7047772407531738, "avg_line_length": 31.120689392089844, "blob_id": "e14356138a0e27138cbe0445e11781f4d8d3e1fd", "content_id": "82c85e6941d48ad9e809537bc8a01165f79193de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1863, "license_type": "no_license", "max_line_length": 137, "num_lines": 58, "path": "/iris.py", "repo_name": "Wuerike/SLP", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\n\nfrom perceptron import Perceptron\n\n# read the data set\ndata = pd.read_csv(\"iris.csv\")\n\n# Get data only from setosa and versicolor classes\n# Get the atributtes sepal length and petal length\nvalues = data.iloc[0:100, [0, 2]].values\nlabels = data.iloc[0:100, 4].values\n\n# Redefine setosa class as 0 and versicolor as 1\nlabels = np.where(labels == 'Iris-setosa', 0, 1)\n\n# Split the data set in train data set an test data set\ntrain_values, test_values, train_labels, test_labels = train_test_split(values, labels, test_size=0.2, random_state=101, stratify=labels)\n\n# Train the perceptron \np = Perceptron(max_training_epoch=100, learning_rate=0.001)\np.train(train_values, train_labels)\n\n# Prdict the test values data set\npredictions = p.predict(test_values)\n\nprint(\"Test data-set accuracy\", p.accuracy(test_labels, predictions))\n\n# Shows a plot with the test data set\np.plot_data(test_values, test_labels, \"PERCEPTRON: TEST DATA-SET\")\n\n\n# Plot the all the setosa and versicolor values\nfig = plt.figure()\nax = fig.add_subplot(1,1,1)\nax.set_title(\"COMPLETE IRIS DATA-SET\")\n\nplt.scatter(values[:50, 0], values[:50, 1], color='red', marker='o', label='setosa')\nplt.scatter(values[50:100, 0], values[50:100, 1], color='blue', marker='x', label='versicolor')\nplt.xlabel('petal length')\nplt.ylabel('sepal length')\nplt.legend(loc='upper left')\n\n# Get X min and max values\nx1 = np.amin(values[:,0])\nx2 = np.amax(values[:,0])\n\n# From the perceptron predict function w1*x1 + w2*x2 + b = 0\n# Being x1 = x and x2 = y ---> y = (-w1*x - b)/w2\n# Get Y values to max and min X values\ny1 = (-p.weights[0] * x1 - p.bias) / p.weights[1]\ny2 = (-p.weights[0] * x2 - p.bias) / p.weights[1]\n\n# Plot the decision boundery\n#plt.plot([x1, x2], [y1, y2], 'k')\nplt.show()\n" }, { "alpha_fraction": 0.6059046983718872, "alphanum_fraction": 0.6220085024833679, "avg_line_length": 41.18867874145508, "blob_id": "346037ea00e12938e1e1fb1cc7c46d80a639d249", "content_id": "119b5634c19ccbc07cf0df4ef01bb17cc60b0bf7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4471, "license_type": "no_license", "max_line_length": 124, "num_lines": 106, "path": "/perceptron.py", "repo_name": "Wuerike/SLP", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import datasets\nimport matplotlib.pyplot as plt\n\n\nclass Perceptron(object):\n def __init__(self, max_training_epoch = 100, learning_rate = 0.01):\n self.max_training_epoch = max_training_epoch\n self.learning_rate = learning_rate\n self.convergence_counter = 0\n self.convergence_factor = 3\n self.weights = None\n self.bias = 0\n self.previous_bias = 0\n\n # Method that plots a data set and the decision boundary line\n def plot_data(self, values, labels, plot_name):\n # Plot the received values / labels\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_title(plot_name)\n plt.scatter(values[:,0], values[:,1] ,marker='x', c=labels)\n\n # Get X min and max values\n x1 = np.amin(values[:,0])\n x2 = np.amax(values[:,0])\n\n # From the perceptron predict function w1*x1 + w2*x2 + b = 0\n # Being x1 = x and x2 = y ---> y = (-w1*x - b)/w2\n # Get Y values to max and min X values\n y1 = (-self.weights[0] * x1 - self.bias) / self.weights[1]\n y2 = (-self.weights[0] * x2 - self.bias) / self.weights[1]\n\n # Plot the decision boundery\n ax.plot([x1, x2], [y1, y2], 'k')\n\n # show the plot in a new window\n plt.show()\n\n # Method that checks if the training has converged\n def check_convergence(self):\n if self.previous_bias == self.bias:\n # If repeating bias, increment the counter\n self.convergence_counter += 1\n else:\n # Otherwise, save the bias and set the counter to 0\n self.previous_bias = self.bias\n self.convergence_counter = 0\n\n # Returns true when the counter is equal to convergence_factor\n if self.convergence_counter == self.convergence_factor:\n return True\n else:\n return False\n\n # Method that returns the accuracy for given predited labels and real labels values\n def accuracy(self, real_labels, predited_labels):\n accuracy = np.sum(real_labels == predited_labels) / len(real_labels)\n return accuracy\n\n # Method to predict labels based on its inputs\n def predict(self, inputs):\n # bias value + scalar product between inputs and weights\n dot_product = np.dot(inputs, self.weights) + self.bias\n\n # apply the activation function, returning 0 or 1 \n return np.where(dot_product>=0, 1, 0)\n\n # Method to train an one layer perceptron neural artificial network\n def train(self, training_inputs, training_labels):\n # Gets the training data dimensions (2 atributtes make the plot visible)\n n_samples, n_atributtes = training_inputs.shape\n self.weights = np.zeros(n_atributtes)\n \n for epoch in range(self.max_training_epoch):\n # Apply the traning rule to ajust weights and bias\n for inputs, label in zip(training_inputs, training_labels):\n prediction = self.predict(inputs)\n self.weights += self.learning_rate * (label - prediction) * inputs\n self.bias += self.learning_rate * (label - prediction)\n\n # Early stops the training when repeating the same bias for convergence_factor times\n if self.check_convergence():\n self.plot_data(training_inputs, training_labels, \"Training complete with \" + str(epoch + 1) + \" epochs\")\n break\n\n # plot each traning epoch, plot window should be closed to see next interation\n self.plot_data(training_inputs, training_labels, \"Training epoch: \" + str(epoch + 1))\n\ndef test():\n # Create a data set as described in the parameters\n values, labels = datasets.make_blobs(n_samples=300, n_features=2, centers=2, cluster_std=1.25, random_state=6)\n # Split the data set in train data set an test data set\n train_values, test_values, train_labels, test_labels = train_test_split(values, labels, test_size=0.2, random_state=101)\n\n p = Perceptron(max_training_epoch=100, learning_rate=0.001)\n p.train(train_values, train_labels)\n predictions = p.predict(test_values)\n\n print(\"Test data-set accuracy\", p.accuracy(test_labels, predictions))\n\n p.plot_data(test_values, test_labels, \"PERCEPTRON: TEST DATA-SET\")\n\nif __name__ == '__main__':\n test()" }, { "alpha_fraction": 0.7856560945510864, "alphanum_fraction": 0.7954360246658325, "avg_line_length": 54.79545593261719, "blob_id": "904cfd4bca4229f4f5b68f64f1d376fb44c8d1d5", "content_id": "c9cbe345a391a8d8e84facdad919c60a628ee38e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2509, "license_type": "no_license", "max_line_length": 229, "num_lines": 44, "path": "/readme.md", "repo_name": "Wuerike/SLP", "src_encoding": "UTF-8", "text": "O arquivo perceptron.py é efetivamente a implementação de uma rede neural perceptron de 1 layer e 1 neurônio.\nSendo capaz de separar duas classes entre si através da análise de dois atributos.\n\nA implementação foi realizada com o uso de orientação a objetos, então inicialmente deve-se criar um objeto da classe Perceptron.\nO construtor espera por dois paramatros, o numero máximo de épocas de treinamento e a taxa de aprendizagem.\n\np = Perceptron(max_training_epoch=100, learning_rate=0.001)\n\nFeito isto, pode-se então treinar a rede através do método train.\n\np.train(train_values, train_labels)\n\nDurante o treinamento, a cada época será plotado os pontos utilizados para treinamento bem como a reta que representa a fronteira de decisão. \nPara que uma proxima época seja executada, deve-se clicar para fechar o plot, e então um próximo abrirá, representando uma nova iteração.\nNo método train, utiliza-se o método check_convergence para verificar se houve convergência antes de atingir o numero máximo de epocas configurado.\n\nQuando a convergência for obtida, será plotado o data set de teste e a fronteira de decisão final, e o titulo do plot demonstrará quantas épocas foram necessárias.\nCom o treino realizado, pode-se então chamar o método predict que retornará uma lista com as previsões feitas para os valores passados.\n\npredictions = p.predict(test_values)\n\nPor fim, pode-se chamar o métudo accuracy para verificar a acurácia das previsões em relação às labels reais.\nEste método espera as labels verdadeiras e as labels previstas e então retorna a acurácia.\n\np.accuracy(test_labels, predictions)\n\nO formato esperado para values e labels, tanto no treino quanto na previsão é:\n\n[[atributo1 atributo2]\n [atributo1 atributo2]\n [atributo1 atributo2]\n .\n .\n .\n]\n\n[label label label ... ]\n\n\nAo executar o arquivo perceptron.py (python /path/to/perceptron.py) será executado um treino e teste com dados criados pela função make_blobs da biblioteca scikit-learn.\n\nAo executar o arquivo iris.py (python /path/to/iris.py) utiliza-se então o iris data set para treino e teste.\nComo esse data set tem 4 atributos e 3 classes, utilizou-se apenas 2 atributos (sepal lenth e petal length) e apenas 2 classes (setosa e versicolor)\nDesta forma, apenas as 100 primeiras linhas do data set foram utilizadas, onde então foram divididas nos grupos de treino e teste, sendo que o grupo de teste representa 20% do data set enquanto o restante fica no grupo de treino." } ]
3
becgorton/parcels
https://github.com/becgorton/parcels
954fa5ba03d3c34197b2a2725a2c54e64ccceddd
37cf0993ef2b4a9f76cb9b545253464974c2f923
a38a4fec6031b0533a0d9546fdac7b349fc7a04e
refs/heads/master
2020-06-12T18:59:13.868752
2019-07-04T06:23:32
2019-07-04T06:23:32
194,394,589
0
0
MIT
2019-06-29T10:44:43
2019-06-25T12:22:29
2019-06-28T15:55:30
null
[ { "alpha_fraction": 0.7041284441947937, "alphanum_fraction": 0.7224770784378052, "avg_line_length": 42.599998474121094, "blob_id": "38f8dd42e330af35f71214e203593d19a49662bf", "content_id": "2980fdd319876b2f43f4ca61011c306ae694691b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "permissive", "max_line_length": 92, "num_lines": 10, "path": "/tests/test_particle_file.py", "repo_name": "becgorton/parcels", "src_encoding": "UTF-8", "text": "from parcels.particlefile import _set_calendar\nfrom parcels.tools.converters import _get_cftime_calendars, _get_cftime_datetimes\nimport cftime\n\n\ndef test_set_calendar():\n for calendar_name, cf_datetime in zip(_get_cftime_calendars(), _get_cftime_datetimes()):\n date = getattr(cftime, cf_datetime)(1990, 1, 1)\n assert _set_calendar(date.calendar) == date.calendar\n assert _set_calendar('np_datetime64') == 'standard'\n" }, { "alpha_fraction": 0.4035874307155609, "alphanum_fraction": 0.4708520174026489, "avg_line_length": 31.962963104248047, "blob_id": "98756a691ba2ff8401da234de31e642f33e60fc9", "content_id": "60f72b46c5dd5dccb3429df08fafba2b69355645", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1784, "license_type": "permissive", "max_line_length": 93, "num_lines": 54, "path": "/parcels/rtree_util.py", "repo_name": "becgorton/parcels", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 4 13:56:27 2019\n\n@author: bec\n\"\"\"\n\nimport numpy as np\nimport xarray as xr\nimport pyindex.core as core\nimport numba\n\[email protected](cache=True)\ndef get_relative_coordinates(lon, lat, x, y, xi, yi):\n '''returns relative coordinates xsi, eta\n that are the coordinates of the (x, y) point remapped into a square cell [0,1] x [0,1]\n '''\n invA = np.array([\n 1.0, 0.0, 0.0, 0.0, -1.0, 1.0, 0.0, 0.0, -1.0, 0.0, 0.0, 1.0, 1.0,\n -1.0, 1.0, -1.0\n ]).reshape(4, 4)\n px = np.array(\n [lon[yi, xi], lon[yi, xi + 1], lon[yi + 1, xi + 1], lon[yi + 1, xi]])\n px = np.where(px[:] - x > 180, px - 360, px)\n px = np.where(px[:] - x < -180, px + 360, px)\n py = np.array(\n [lat[yi, xi], lat[yi, xi + 1], lat[yi + 1, xi + 1], lat[yi + 1, xi]])\n #print(type(invA))\n #print(type(px))\n\n a = np.dot(invA, px)\n b = np.dot(invA, py)\n\n aa = a[3] * b[2] - a[2] * b[3]\n bb = a[3] * b[0] - a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + x * b[\n 3] - y * a[3]\n cc = a[1] * b[0] - a[0] * b[1] + x * b[1] - y * a[1]\n if abs(aa) < 1e-12: # Rectilinear cell, or quasi\n eta = -cc / bb\n else:\n det2 = bb * bb - 4 * aa * cc\n if det2 > 0: # so, if det is nan we keep the xsi, eta from previous iter\n det = np.sqrt(det2)\n eta = (-bb + det) / (2 * aa)\n else: # should not happen, apart from singularities\n eta = 1e6\n if abs(a[1] + a[3] *\n eta) < 1e-12: # this happens when recti cell rotated of 90deg\n xsi = ((y - py[0]) / (py[1] - py[0]) + (y - py[3]) /\n (py[2] - py[3])) * .5\n else:\n xsi = (x - a[0] - a[2] * eta) / (a[1] + a[3] * eta)\n return (xsi, eta)\n " } ]
2
stelzch/picamview
https://github.com/stelzch/picamview
b122ce3360ac61d96de08ca06b3c25ba91c7e722
168994853eced2be30d46cc0fdac014df5a6448a
776c6744a63f1f7446f732a49196e2854db78ea1
refs/heads/master
2021-01-22T10:59:44.942923
2017-02-15T12:45:19
2017-02-15T12:45:19
82,059,976
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6838323473930359, "alphanum_fraction": 0.6946107745170593, "avg_line_length": 27.827587127685547, "blob_id": "1c1b8604f31fae56b5372f3b59075d0b83f88bc8", "content_id": "afbbb6329f5b3be655b1db2ef717668218696bd4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 835, "license_type": "no_license", "max_line_length": 65, "num_lines": 29, "path": "/picamview/__main__.py", "repo_name": "stelzch/picamview", "src_encoding": "UTF-8", "text": "import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk\nfrom picamera import PiCamera\nfrom os import path\n\nclass Controller:\n\tdef __init__(self):\n\t\there = path.abspath(path.dirname(__file__))\n\t\tself.builder = Gtk.Builder()\n\t\tself.builder.add_from_file(path.join(here, 'mainwindow.glade'))\n\t\tself.window = self.builder.get_object('mainWindow')\n\t\tself.img = self.builder.get_object('capImg')\n\t\tself.btn = self.builder.get_object('capBtn')\n\t\tself.cam = PiCamera(resolution=(1280,720))\n\t\t\n\t\tself.window.connect('delete-event', Gtk.main_quit)\n\t\tself.btn.connect('clicked', self.capture)\n\t\tself.window.show_all()\n\n\n\tdef capture(self, arg=''):\n\t\tself.cam.capture('/tmp/picamview-capture.png', 'png')\n\t\tself.img.set_from_file('/tmp/picamview-capture.png')\n\nif __name__ == '__main__':\n\tc = Controller()\n\tc.capture()\n\tGtk.main()" } ]
1
radmimir/Klimov_Fastems_parser
https://github.com/radmimir/Klimov_Fastems_parser
9adfdeb37707d6711de1325ff4d1b11b0b665674
759448a6a1416e1fc5126978c2877a41068f72f6
71b3adf93599d585c0afe386b81cb46a43053e32
refs/heads/master
2020-09-20T09:55:37.875798
2019-12-11T14:09:12
2019-12-11T14:09:12
224,443,213
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5752895474433899, "alphanum_fraction": 0.5965250730514526, "avg_line_length": 26.263158798217773, "blob_id": "69b2269ec34d3d02c00f77c0e097854391945471", "content_id": "888bd11c3fa352c44658b831b3188940f7afd94a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 64, "num_lines": 19, "path": "/graphs.py", "repo_name": "radmimir/Klimov_Fastems_parser", "src_encoding": "UTF-8", "text": "from matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom constants import constants\n\n\ndef graph3d(x, y, z): # построение модели\n fig = plt.figure()\n ax = Axes3D(fig)\n n = len(x)\n const = constants()\n print(\"Graph 3D plotting\", n, \"graphs...\")\n for i in range(len(x)):\n ax.scatter(x[i], y[i], z[i], s=1., c=const['colors'][i])\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.legend(['Up', 'Down'])\n ax.view_init(90, 180)\n plt.show()\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 18.58333396911621, "blob_id": "e5f6b5e47ea1c0d5ac3c16fc3260e8de5b186497", "content_id": "0881f885306533eefa5da7913281dc9febf0959a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 235, "license_type": "no_license", "max_line_length": 40, "num_lines": 12, "path": "/install.py", "repo_name": "radmimir/Klimov_Fastems_parser", "src_encoding": "UTF-8", "text": "import PyInstaller.__main__\nfrom os.path import join\nimport constants\n\nconsts = constants.constants()\n\nPyInstaller.__main__.run([\n '--onefile',\n '--noconsole',\n '--icon=%s' % join('.', 'icon.ico'),\n join('', 'forms.py')\n])\n" }, { "alpha_fraction": 0.4645550549030304, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 32.150001525878906, "blob_id": "3314f2d5248b2ade372f123666fd5579d95a6ae6", "content_id": "0a4fb679e5b6ec686897408b14161f03ca53584a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 737, "license_type": "no_license", "max_line_length": 95, "num_lines": 20, "path": "/constants.py", "repo_name": "radmimir/Klimov_Fastems_parser", "src_encoding": "UTF-8", "text": "import math\n\n\ndef constants() -> dict:\n res = {}\n res['PARAMS_OFFSET'] = 10 ** 5 # Смещение параметров\n res['FILE_OFFSET'] = 6 # Смещение начала данных\n res['DOTS_OFFSET'] = 10 ** 5 # смещение точек\n res['Y_ZERO'] = -2147483648 # Смещение y\n res['th'] = [\"X_UP\", \"Y_UP\", \"Z_UP\", \" \", \"X_DOWN\", \"Y_DOWN\", \"Z_DOWN\"] # Подписи столбцов\n res['offset_x'] = 3.564\n res['offset_y'] = -8.488\n res['offset_z'] = 0.326\n res['z_offset'] = 16645.0\n res['x_rotate'] = math.radians(3.5)\n res['y_rotate'] = 0\n res['z_rotate'] = math.radians(180) # 23.757\n res['colors'] = 'black', 'red'\n res['SMALL'] = 10 ** -3\n return res\n" }, { "alpha_fraction": 0.5848120450973511, "alphanum_fraction": 0.6103759407997131, "avg_line_length": 50.153846740722656, "blob_id": "57d2a7ec3a530f4ec42325eee7d7afe5ec306abe", "content_id": "d0fab0f9dc5b8852b89356325ed9e9d9eff6e5c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6897, "license_type": "no_license", "max_line_length": 118, "num_lines": 130, "path": "/forms.py", "repo_name": "radmimir/Klimov_Fastems_parser", "src_encoding": "UTF-8", "text": "from tkinter import Frame, W, E, N, S, \\\n StringVar, Entry, Button, Label, Checkbutton, \\\n BooleanVar, DoubleVar\nfrom tkinter.filedialog import askopenfilename\nfrom tkinter.messagebox import showerror, showinfo\nfrom convert import convert_form\nfrom os.path import abspath\nfrom graphs import graph3d\nfrom numpy import loadtxt, float as flt\nfrom tools import filter3d, MAXDOUBLE, MINDOUBLE\n\n\nclass MyFrame(Frame):\n convert_res = ''\n\n def __init__(self):\n Frame.__init__(self)\n self.master.title(\"Profile parser\")\n self.master.rowconfigure(5, weight=1)\n self.master.columnconfigure(5, weight=1)\n self.grid(sticky=W + E + N + S)\n\n # Variables\n self.file_var = StringVar(self, name='file_var')\n self.up_mirror = BooleanVar(self, False)\n self.down_mirror = BooleanVar(self, False)\n self.offset_up_x = DoubleVar(self)\n self.offset_up_y = DoubleVar(self)\n self.offset_down_x = DoubleVar(self)\n self.offset_down_y = DoubleVar(self)\n self.offset_z = DoubleVar(self)\n self.offset = [self.offset_z, self.offset_up_x, self.offset_up_y, self.offset_down_x, self.offset_down_y]\n self.convert_res = StringVar(self)\n # Filters\n self.filter_up_x = DoubleVar(self, MAXDOUBLE)\n self.filter_down_x = DoubleVar(self, MINDOUBLE)\n self.filter_up_y = DoubleVar(self, MAXDOUBLE)\n self.filter_down_y = DoubleVar(self, MINDOUBLE)\n self.filter_up_z = DoubleVar(self, MAXDOUBLE)\n self.filter_down_z = DoubleVar(self, MINDOUBLE)\n self.filter = [self.filter_down_x, self.filter_up_x, self.filter_down_y, self.filter_up_y, self.filter_down_z,\n self.filter_up_z]\n\n Entry(self, width=70,\n textvariable=self.file_var).grid(row=0, column=0, sticky=W, padx=30, pady=30, columnspan=5)\n\n Button(self, text=\"Выбрать файл\", command=lambda: self.load_file(self.file_var),\n width=15).grid(row=0, column=6, sticky=W, padx=20, pady=20)\n Label(self, text='Смещение Z', font='Arial 10').grid(row=1, column=0, sticky=W, padx=30, pady=5)\n Entry(self, textvariable=self.offset_z).grid(row=1, column=1, sticky=W, padx=5, pady=5)\n\n Label(self, text='Смещение X', font='Arial 10').grid(row=1, column=2, sticky=W, padx=30, pady=5)\n Entry(self, textvariable=self.offset_up_x).grid(row=2, column=2, sticky=W, padx=5, pady=5)\n Entry(self, textvariable=self.offset_down_x).grid(row=3, column=2, sticky=W, padx=5, pady=5)\n\n Label(self, text='Смещение Y', font='Arial 10').grid(row=1, column=3, sticky=W, padx=30, pady=5)\n Entry(self, textvariable=self.offset_up_y).grid(row=2, column=3, sticky=W, padx=5, pady=5)\n Entry(self, textvariable=self.offset_down_y).grid(row=3, column=3, sticky=W, padx=5, pady=5)\n\n Label(self, text='UP', font='Arial 12 bold').grid(row=2, column=0, sticky=W, padx=30, pady=5)\n Checkbutton(self, text='Зеркало XZ', variable=self.up_mirror).grid(row=2, column=1, sticky=W)\n\n Label(self, text='DOWN', font='Arial 12 bold').grid(row=3, column=0, sticky=W, padx=30, pady=5)\n Checkbutton(self, text='Зеркало YZ', variable=self.down_mirror).grid(row=3, column=1, sticky=W)\n\n Button(self, text=\"Преобразовать\",\n command=lambda: self.button_convert(self.file_var, self.up_mirror, self.down_mirror,\n self.offset),\n width=15).grid(row=2, column=6, sticky=W, padx=20, pady=20)\n\n Button(self, text=\"График\",\n command=lambda: self.button_graph(self.file_var, self.filter),\n width=15).grid(row=7, column=6, sticky=W, padx=20, pady=20)\n\n Label(self, text='Верхняя граница', font='Arial 10').grid(row=4, column=2, sticky=W, padx=5, pady=5)\n Label(self, text='Нижняя граница', font='Arial 10').grid(row=4, column=3, sticky=W, padx=5, pady=5)\n\n Label(self, text='Фильтр X', font='Arial 12 bold').grid(row=5, column=0, sticky=W, padx=30, pady=5)\n Entry(self, textvariable=self.filter_up_x).grid(row=5, column=2, sticky=W, padx=5, pady=5)\n Entry(self, textvariable=self.filter_down_x).grid(row=5, column=3, sticky=W, padx=5, pady=5)\n\n Label(self, text='Фильтр Y', font='Arial 12 bold').grid(row=6, column=0, sticky=W, padx=30, pady=5)\n Entry(self, textvariable=self.filter_up_y).grid(row=6, column=2, sticky=W, padx=5, pady=5)\n Entry(self, textvariable=self.filter_down_y).grid(row=6, column=3, sticky=W, padx=5, pady=5)\n\n Label(self, text='Фильтр Z', font='Arial 12 bold').grid(row=7, column=0, sticky=W, padx=30, pady=5)\n Entry(self, textvariable=self.filter_up_z).grid(row=7, column=2, sticky=W, padx=5, pady=5)\n Entry(self, textvariable=self.filter_down_z).grid(row=7, column=3, sticky=W, padx=5, pady=5)\n\n def load_file(self, file_var):\n f = askopenfilename(filetypes=((\"Profile files\", \"*.profile\"),\n (\"All files\", \"*.*\")), initialdir=abspath(__file__))\n if f:\n file_var.set(f)\n return self\n\n def button_convert(self, filevar, up_mirror, down_mirror, offset):\n filevar = filevar.get()\n up_mirror = up_mirror.get()\n down_mirror = down_mirror.get()\n offset = [i.get() for i in offset]\n res = convert_form(filevar, up_mirror, down_mirror, offset)\n if res:\n message = 'Файлы успешно записаны.'\n showinfo('Информация', message)\n else:\n message = 'Ошибка - неверный путь к файлу или формат. \\nТребуется .profile - файл.'\n showerror('Ошибка', message)\n self.convert_res.set(res)\n\n def button_graph(self, filevar, filter, dec=10):\n filevar = filevar.get()\n filter = [i.get() for i in filter]\n if not filevar or filevar[-7:] != 'profile':\n message = 'Ошибка - неверный путь к файлу или формат. \\nТребуется .profile - файл.'\n showerror('Ошибка', message)\n return 0\n up_file = filevar[:-8] + \"_up.txt\"\n down_file = filevar[:-8] + \"_down.txt\"\n up_data = loadtxt(up_file, delimiter=';', dtype=flt)\n down_data = loadtxt(down_file, delimiter=';', dtype=flt)\n up_data = filter3d(up_data, filter)\n down_data = filter3d(down_data, filter)\n x, y, z = [(up_data[:, 0][::dec], down_data[:, 0][::dec]), (up_data[:, 1][::dec], down_data[:, 1][::dec]),\n (up_data[:, 2][::dec], down_data[:, 2][::dec])]\n graph3d(x, y, z)\n\n\nif __name__ == \"__main__\":\n window = MyFrame().mainloop()\n" }, { "alpha_fraction": 0.4848935008049011, "alphanum_fraction": 0.5237741470336914, "avg_line_length": 27.842857360839844, "blob_id": "4a17fb46836983b6f5458d8ac6544e13ba6f19cc", "content_id": "92e6d8eb82a356d588c7e114a193beb68d2999ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4138, "license_type": "no_license", "max_line_length": 120, "num_lines": 140, "path": "/tools.py", "repo_name": "radmimir/Klimov_Fastems_parser", "src_encoding": "UTF-8", "text": "#import math\nfrom constants import constants\n# import numpy as np\nimport os\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import pyplot as plt\nfrom numpy import append, bitwise_and, where\n\nMAXDOUBLE = 2147483648.\nMINDOUBLE = -2147483648.\n\n\ndef rect(r, phi):\n x = r * math.cos(phi)\n y = r * math.sin(phi)\n return x, y\n\n\ndef polar(x, y):\n r = (x ** 2 + y ** 2) ** .5\n phi = math.atan2(y, x)\n return r, phi\n\n\ndef rotate_polar(x, y, offset_phi):\n r, phi = polar(x, y)\n phi += offset_phi\n x, y = rect(r, phi)\n return x, y\n\n\ndef distance(x1, y1, z1, x2, y2, z2):\n return math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2 + (z2 - z1) ** 2)\n\n\ndef np_optim(x, y, z, offset): # xyz numpy arrays optimization, z - must be sorted\n xyz = array((x, y, z), dtype='float')\n xyz = xyz[xyz[:, 1].argsort()]\n # print(xyz)\n x1, y1, z1 = xyz\n n = len(x1)\n x2, y2, z2 = [], [], []\n # a = list(z1).index(offset)\n x1, y1, z1 = xyz\n for i in range(0, n, 5):\n x2.append(x1[i])\n y2.append(y1[i])\n z2.append(z1[i])\n return x2, y2, z2\n\n\ndef make_gif(x, y, z):\n n = len(x)\n if not os.path.exists('frames'): # проверка наличия пути сохранения\n os.mkdir('frames')\n else:\n for file in os.listdir('frames'):\n os.unlink(os.path.join('frames/', file))\n # Мы собираемся сделать 20 графиков, для 20 разных углов\n for angle in range(70, 270, 2):\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n for i in range(n):\n ax.scatter(x[i], y[i], z[i], s=1.)\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n\n ax.view_init(30, angle)\n\n filename = 'frames/step' + str(angle) + '.png'\n plt.savefig(filename, dpi=96)\n plt.gca()\n plt.close(fig)\n\n\ndef graph3d(x, y, z): # построение модели и выгрузка в gif - файл\n fig = plt.figure()\n ax = Axes3D(fig)\n dict_z1 = {}\n n = len(x)\n const = constants()\n print(\"Graph 3D plotting\", n, \"graphs...\")\n for i in range(len(x)):\n ax.scatter(x[i], y[i], z[i], s=1., c=const['colors'][i])\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n plt.legend(['Up', 'Down'])\n ax.view_init(90, -200)\n plt.show()\n\n\ndef offset(x, y, z, consts, offset_x=0, offset_y=0, offset_z=0):\n if offset_x or offset_y or offset_z:\n x += offset_x\n y += offset_y\n z += offset_z\n else:\n x += consts['offset_x']\n y += consts['offset_y']\n z += consts['offset_z']\n return x, y, z\n\n\ndef convert3d_arrays_to_list_of_tuples(data, dec): # list of 3d numpy arrays\n x, y, z = [], [], []\n for arr in data:\n x = x.extend(arr[:, 0][::dec])\n y = y.extend(arr[:, 1][::dec])\n z = z.extend(arr[:, 2][::dec])\n\n\ndef filter3d(data, filter): # takes 3d numpy array [[x0,y0,z0],[x1,y1,z1]...]\n # minx,maxx,miny,maxy,minz,maxz\n cond1 = bitwise_and(data[:, 0] > filter[0], data[:, 0] < filter[1])\n cond2 = bitwise_and(data[:, 1] > filter[2], data[:, 1] < filter[3])\n cond3 = bitwise_and(data[:, 2] > filter[4], data[:, 2] < filter[5])\n cond = bitwise_and(cond1, cond2)\n cond = bitwise_and(cond, cond3)\n data = data[where(cond)]\n \"\"\"x, y, z = array(data[:, 0]), array(data[:, 1]), array(data[:, 2])\n x = x[where(bitwise_and(data[:, 0] > filter[0], data[:, 0] < filter[1]))]\n y = y[where(bitwise_and(data[:, 1] > filter[2], data[:, 1] < filter[3]))]\n z = z[where(bitwise_and(data[:, 2] > filter[4], data[:, 2] < filter[5]))]\"\"\"\n \"\"\"data = array()\n data = append(data, x)\n data = append(data, y)\n data = append(data, z)\n print(data)\n x, y, z = x[0:, ], y[0:, ], z[0:, ]\"\"\"\n \"\"\"for i in range(n):\n m = len(x[i])\n for j in range(m):\n x1 = x[i][j]\n y1 = y[i][j]\n z1 = z[i][j]\n if x1 < filter[0] or x1 > filter[1] or y1 < filter[2] or y1 > filter[3] or z1 < filter[4] or z1 > filter[5]:\n delete()\"\"\"\n return data\n" }, { "alpha_fraction": 0.5359786748886108, "alphanum_fraction": 0.5510808229446411, "avg_line_length": 38.26744079589844, "blob_id": "50ba9ebdf31b21bc4fb6dcfb2fca9bf13024eaf8", "content_id": "dbd0439c821e8a8d4279e54b5383d4202a5bae90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3438, "license_type": "no_license", "max_line_length": 105, "num_lines": 86, "path": "/parser.py", "repo_name": "radmimir/Klimov_Fastems_parser", "src_encoding": "UTF-8", "text": "import os\nimport cmath, math\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom itertools import groupby\nfrom matplotlib import pyplot as plt\nfrom scipy.interpolate import griddata\nimport pandas as pd\nimport plotly.graph_objects as go\nimport numpy as np\nimport locale\nimport gc\nfrom constants import constants as cnt\nimport convert, tools\nfrom scipy import ndimage\nimport statistics as sts\nimport sys\nfrom collections import Mapping, Container\nfrom sys import getsizeof\n\n\ndef main():\n (x_up, x_down), (y_up, y_down), (z_up, z_down) = convert.convert(1)\n os.system('chcp 668>nul')\n constants = cnt()\n Wing_offset_up = 0.01 # 153.02\n Wing_offset_down = 0.01 # 154.22\n # x_up, y_up, z_up = tools.np_optim(x_up, y_up, z_up, Wing_offset_up)\n # x_down, y_down, z_down = tools.np_optim(x_down, y_down, z_down, Wing_offset_down)\n # Попытка отсортировать по словарю - вылет по памяти\n \"\"\"d_up = dict.fromkeys(z_up, [])\n d_down = dict.fromkeys(z_down, [])\n for j in range(len(z_up)):\n d_up[z_up[j]].append([x_up[j], y_up[j]])\n for j in range(len(z_down)):\n d_down[z_down[j]].append([x_down[j], y_down[j]])\n sorted_keys_up = sorted(z_up)\n sorted_keys_down = sorted(z_down)\n print(deep_getsizeof(d_up), sys.getsizeof(sorted_keys_up))\n x_up, y_up, z_up, x_down, y_down, z_down = [], [], [], [], [], []\n for key in sorted_keys_up:\n for dot_xy in d_up[key]:\n x_up.append(dot_xy[0])\n y_up.append(dot_xy[1])\n z_up.append(key)\n for key in sorted_keys_down:\n for dot_xy in d_down[key]:\n x_down.append(dot_xy[0])\n y_down.append(dot_xy[1])\n z_down.append(key)\"\"\"\n\n x, y, z = ([x_up[::5], x_down[::5]], [y_up[::5], y_down[::5]],\n [z_up[::5], z_down[::5]])\n (x_up, x_down), (y_up, y_down), (z_up, z_down) = x, y, z\n n = len(z_up)\n # tools.graph3d(x, y, z)\n x_up, y_up, z_up = tools.offset(x_up, y_up, z_up, constants)\n x_down, y_down, z_down = tools.offset(x_down, y_down, z_down, constants)\n for j in range(n):\n x_up[j], z_up[j] = tools.rotate_polar(x_up[j], z_up[j], offset_phi=constants['y_rotate'])\n x_up[j], y_up[j] = tools.rotate_polar(x_up[j], y_up[j], offset_phi=constants['z_rotate'])\n y_up[j], z_up[j] = tools.rotate_polar(y_up[j], z_up[j], offset_phi=constants['x_rotate'])\n x_down, y_down, z_down = tools.offset(x_down, y_down, z_down, constants, offset_x=-5, offset_y=-2.75)\n x1, y1, z1 = (x_up, x_down), (y_up, y_down), (z_up, z_down)\n # удаление \"плохих\" точек\n \"\"\"for i in range(len(x)):\n for j in range(len(z[i])):\n try:\n if j >= len(z[i]):\n break\n if z[i][j] < 0 or y[i][j] > 15:\n print(\"deleted: \", i, j, x[i][j], y[i][j], z[i][j])\n x[i][j] = np.delete(x[i], j, 0)\n y[i][j] = np.delete(y[i], j, 0)\n z[i][j] = np.delete(z[i], j, 0)\n except IndexError:\n print(i, j, len(x[i]), len(y[i]), len(z[i]))\"\"\"\n # x[i] = list(ndimage.gaussian_filter(x[i], sigma=sts.stdev(x[i])))\n # y[i] = list(ndimage.gaussian_filter(y[i], sigma=sts.stdev(y[i])))\n # z[i] = list(ndimage.gaussian_filter(z[i], sigma=sts.stdev(z[i])))\n tools.graph3d(x1, y1, z1)\n return 0\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.4324645698070526, "alphanum_fraction": 0.4447527527809143, "avg_line_length": 43.45374298095703, "blob_id": "988d73322a78de5d8f338ae75de938a3f990adf2", "content_id": "3ca77ea383053f09fcf2b27850f8f6d5a5c5271a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10830, "license_type": "no_license", "max_line_length": 114, "num_lines": 227, "path": "/convert.py", "repo_name": "radmimir/Klimov_Fastems_parser", "src_encoding": "UTF-8", "text": "from parser import *\nfrom constants import constants as cnt\n\nfrom numpy import array, append\n\n\ndef convert(amount_of_files=-1): # преобразование файла и возврат в виде 6 numpy - массивов\n consts = cnt()\n dirname = os.path.dirname(__file__)\n in_dir = os.path.join(dirname, 'files\\\\')\n out_dir = os.path.join(dirname, 'result\\\\')\n out_dir = os.path.join(dirname, 'result\\\\')\n x_ar, y_ar, z_ar = array([]), array([]), array([])\n x_ard, y_ard, z_ard = array([]), array([]), array([])\n if os.path.exists(in_dir):\n if not os.path.exists(out_dir):\n os.mkdir(out_dir)\n n = len(os.listdir(in_dir))\n k = 0\n if n != 0:\n for file in os.listdir(in_dir):\n x_ar, y_ar, z_ar = array([]), array([]), array([])\n x_ard, y_ard, z_ard = array([]), array([]), array([])\n infile_name = in_dir + file\n outfile_name_up = out_dir + file[:-8] + \"_up.txt\" # out_dir +\n outfile_name_down = out_dir + file[:-8] + \"_down.txt\" # out_dir +\n print(k + 1, \"Запись. Имя файла=\", infile_name)\n k += 1\n with open(infile_name, 'r') as f:\n inp = f.readlines() # Входные данные\n # Запишем параметры считывания\n params = list(map(int, inp[4].split(';')))\n xpitch = params[2] / consts['PARAMS_OFFSET'] # шаг\n xstart = params[3] / consts['PARAMS_OFFSET'] # Начальное значение x\n data_cnt = params[4] # Количество точек на срезе\n xstop = xstart + xpitch * data_cnt # Конечный x\n n = len(inp) # Количество строк в файле\n # Инициализация переменных\n out = [] # Выходной массив\n # Цикл по строкам\n outfile_up = open(outfile_name_up, 'w')\n outfile_down = open(outfile_name_down, 'w')\n try:\n for i in range(consts['FILE_OFFSET'], n, 1):\n x = xstart - consts['offset_x']\n a = list(map(int, inp[i].split(';')[:-1]))\n y_up = list(map(lambda t: t / consts['DOTS_OFFSET'], a[2:data_cnt + 2]))\n y_down = list(map(lambda t: t / consts['DOTS_OFFSET'], a[data_cnt + 2:2 * data_cnt + 2]))\n z = round(a[1] / 10 ** 3 - consts['z_offset'] - consts['offset_z'], 2) # Позиция Энкодера\n if z < 0:\n continue\n for y in y_up:\n if y > 15:\n continue\n if y != consts['Y_ZERO'] / consts['DOTS_OFFSET']:\n outstr_up = \"{0};{1};{2}\\n\".format(x, y, z) # форматирование строки\n x_ar = append(x_ar, x)\n y_ar = append(y_ar, y)\n z_ar = append(z_ar, z)\n outfile_up.write(outstr_up)\n x = round(x + xpitch, 2)\n else:\n x = round(x + xpitch, 2)\n continue\n x = xstart\n for y in y_down:\n if y > 15:\n continue\n if y != consts['Y_ZERO'] / consts['DOTS_OFFSET']:\n outstr_down = \"{0};{1};{2}\\n\".format(x, y, z) # форматирование строки\n outfile_down.write(outstr_down)\n x_ard = append(x_ard, x) # x + 1.5)\n y_ard = append(y_ard, y) # -y + 19)\n z_ard = append(z_ard, z)\n x = round(x + xpitch, 2)\n else:\n x = round(x + xpitch, 2)\n continue\n amount_of_files -= 1\n if amount_of_files == 0:\n break\n except IndexError:\n print(j)\n outfile_up.close()\n outfile_down.close()\n print(\"Всего\", 2 * k, \"файлов записано.\")\n else:\n print(\"Папка с данными files пуста.\")\n else:\n print(\"Папка с данными files не существует. Поместите исполняемый файл в папку files.\")\n return (x_ar, x_ard), (y_ar, y_ard), (z_ar, z_ard)\n\n\ndef read_converted():\n consts = cnt()\n dirname = os.path.dirname(__file__)\n in_dir = os.path.join(dirname, 'result\\\\')\n print(in_dir)\n res = {}\n if not os.path.exists(in_dir):\n print(\"Error, directory /results not found\")\n n = len(os.listdir(in_dir))\n k = 0\n if n != 0:\n for file in os.listdir(in_dir):\n x_ar, y_ar, z_ar = array([], dtype='float'), array([], dtype='float'), array([], dtype='float')\n x_ard, y_ard, z_ard = array([], dtype='float'), array([], dtype='float'), array([], dtype='float')\n if file[-6:-4] == 'up':\n up = 1\n else:\n up = 0\n infile_name = in_dir + file\n print(k + 1, \"Чтение. Имя файла=\", infile_name)\n k += 1\n with open(infile_name, 'r') as f:\n inp = ''.join(f.readlines()).splitlines() # Входные данные\n for i in range(len(inp)):\n inp[i] = list(map(float, (inp[i].split(sep=';'))))\n\n try:\n n = len(inp)\n for line in inp:\n if up:\n x_ar = append(x_ar, line[0])\n y_ar = append(y_ar, line[1])\n z_ar = append(z_ar, line[2])\n else:\n x_ard = append(x_ard, line[0])\n y_ard = append(y_ard, line[1])\n z_ard = append(z_ard, line[2])\n res[file] = (x_ar, x_ard), (y_ar, y_ard), (z_ar, z_ard)\n except IndexError:\n print(j)\n print(\"Всего\", k, \"файлов прочитано.\")\n else:\n print(\"Папка с данными\", dirname, \"пуста.\")\n return res\n\n\ndef convert_form(filename, up_mirror, down_mirror,\n offset): # чтение файла и преобразование координатв зависимости от зеркала,\n # смещения координат, вывод в 2 файла up,down\n # offset = [offset_z,offset_x_up,offset_y_up,offset_x_down,offset_y_down]\n if not filename or filename[-7:] != 'profile':\n return 0\n consts = cnt()\n x_ar, y_ar, z_ar = array([]), array([]), array([])\n x_ard, y_ard, z_ard = array([]), array([]), array([])\n infile_name = filename\n outfile_name_up = infile_name[:-8] + \"_up.txt\" # out_dir +\n outfile_name_down = infile_name[:-8] + \"_down.txt\" # out_dir +\n print(\"Запись. Имя файла=\", infile_name)\n with open(infile_name, 'r') as f:\n inp = f.readlines() # Входные данные\n # Запишем параметры считывания\n params = list(map(int, inp[4].split(';')))\n xpitch = params[2] / consts['PARAMS_OFFSET'] # шаг\n xstart = params[3] / consts['PARAMS_OFFSET'] # Начальное значение x\n data_cnt = params[4] # Количество точек на срезе\n xstop = xstart + xpitch * data_cnt # Конечный x\n n = len(inp) # Количество строк в файле\n # Инициализация переменных\n out = [] # Выходной массив\n # Цикл по строкам\n outfile_up = open(outfile_name_up, 'w')\n outfile_down = open(outfile_name_down, 'w')\n if len(inp[6].split(';')) == 1203:\n off_firstline = 0\n elif len(inp[6].split(';')) == 1204:\n off_firstline = 1\n else:\n return 0\n try:\n for i in range(consts['FILE_OFFSET'], n, 1):\n x_up = xstart + offset[1]\n\n if i != 6 and off_firstline:\n off_firstline = 0\n a = list(map(int, inp[i].split(';')[off_firstline:-1]))\n y_up = list(map(lambda t: t / consts['DOTS_OFFSET'], a[2:data_cnt + 2]))\n y_down = list(map(lambda t: t / consts['DOTS_OFFSET'], a[data_cnt + 2:2 * data_cnt + 2]))\n z = round(a[1] / 10 ** 3 + offset[0], 2) # Позиция Энкодера\n for y in y_up:\n if y != consts['Y_ZERO'] / consts['DOTS_OFFSET']:\n if up_mirror:\n y = -y\n y = y + offset[2]\n outstr_up = \"{0};{1};{2}\\n\".format(x_up, y, z) # форматирование строки\n outfile_up.write(outstr_up)\n x_ar = append(x_ar, x_up) #\n y_ar = append(y_ar, y)\n z_ar = append(z_ar, z)\n x_up = round(x_up + xpitch, 2)\n else:\n x_up = round(x_up + xpitch, 2)\n continue\n if down_mirror:\n x_down = -xstart + offset[3]\n xpitch_down = -xpitch\n else:\n x_down = xstart + offset[3]\n xpitch_down = xpitch\n for y in y_down:\n if y != consts['Y_ZERO'] / consts['DOTS_OFFSET']:\n y = y + offset[4]\n outstr_down = \"{0};{1};{2}\\n\".format(x_down, y, z) # форматирование строки\n outfile_down.write(outstr_down)\n x_ard = append(x_ard, x_down) # x + 1.5)\n y_ard = append(y_ard, y) # -y + 19)\n z_ard = append(z_ard, z)\n x_down = round(x_down + xpitch_down, 2)\n else:\n x_down = round(x_down + xpitch_down, 2)\n continue\n except IndexError:\n print(j)\n outfile_up.close()\n outfile_down.close()\n return 1 # (x_ar, x_ard), (y_ar, y_ard), (z_ar, z_ard)\n\n\nif __name__ == '__main__':\n offset = [0, 0, 0, 0, 0]\n res = convert_form(r'D:\\Projects\\Klimov_Fastems_parser\\files\\2.profile', False, False, offset)\n # res = read_converted()\n # print(res.keys())\n # convert(1)\n" } ]
7
thakurv/pivotal-hw
https://github.com/thakurv/pivotal-hw
ad8793a07907ebbaf48c962d1339c56cb5943d20
cdd0145c91418bfedfd60a1fe021989d22ca96fb
8644f333fbab1928a15aa242933166b9b0909624
refs/heads/master
2020-04-06T07:01:37.048044
2014-07-15T18:14:02
2014-07-15T18:14:02
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4442105293273926, "alphanum_fraction": 0.4736842215061188, "avg_line_length": 18.70833396911621, "blob_id": "d240b12ece918fabc77d2628b4ca805970640ba1", "content_id": "f96f5e2609fb632807012322701e95f2094a8fcd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 475, "license_type": "no_license", "max_line_length": 53, "num_lines": 24, "path": "/fizzbuzz.py", "repo_name": "thakurv/pivotal-hw", "src_encoding": "UTF-8", "text": "'''\nmultiple of 3 'Fizz' and 5 'Buzz' and both 'FizzBuzz'\n'''\nimport os,sys\nfrom flask import Flask\napp = Flask(__name__)\[email protected](\"/\")\n#class FizzBuzz:\ndef numb():\n a=[]\n for i in range(1,101):\n if (i%3==0) and (i%5==0):\n a.append('FizzBuzz')\n elif (i%5)==0:\n a.append('Buzz')\n elif (i%3==0):\n a.append('Fizz')\n else:\n a.append(i)\n return str(a)\n\n\nif __name__ == '__main__':\n app.run()\n\n\n" } ]
1
hiranmayee1123/Technocolabs_mini_project
https://github.com/hiranmayee1123/Technocolabs_mini_project
70b99ca29f120d29c627c71378e0bb0fcbcfb8da
4089ed96f5645f5465838b58cd07d5584651dfe8
3128860b46cf75d3cd8bd18ae39a6ca9373e8cdb
refs/heads/main
2023-02-12T07:45:07.564422
2021-01-10T12:04:51
2021-01-10T12:04:51
327,882,762
3
0
null
null
null
null
null
[ { "alpha_fraction": 0.7751196026802063, "alphanum_fraction": 0.7966507077217102, "avg_line_length": 23.58823585510254, "blob_id": "62ffd767f2c6b387f62d641fe2668f6ca0b096ce", "content_id": "c180590ef87f31e78aa9b9bc4f4cde9cbfb6ba84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 418, "license_type": "no_license", "max_line_length": 69, "num_lines": 17, "path": "/README.md", "repo_name": "hiranmayee1123/Technocolabs_mini_project", "src_encoding": "UTF-8", "text": "Mini Project 1 - Meet Dr. Semmelweis and the discovery of handwashing\n\nMini Project 2 - Credit card company\n\nTask 1: Data Exploration and cleaning\n\nTask 2: Exploring remaining finicial features in dataset\n\nTask 3 : Performing Logistic regression\n\nTask 4 : Fitting a logistic regression model\n\nTask 5 : Cross validation and feature engineering\n\nTask 6: Cross validation Grid search\n\nTask 7 : Deriving finicial insights\n" }, { "alpha_fraction": 0.6376139521598816, "alphanum_fraction": 0.6488085985183716, "avg_line_length": 48.95199966430664, "blob_id": "6fca0e63ff02970b0e475f4822f187d4ad1af1e8", "content_id": "78a3050d7058dc9b2c91e03cc7c5b1723d9ddb65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6259, "license_type": "no_license", "max_line_length": 486, "num_lines": 125, "path": "/Deployement.py", "repo_name": "hiranmayee1123/Technocolabs_mini_project", "src_encoding": "UTF-8", "text": "\n\n\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nimport matplotlib\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve\nfrom sklearn.metrics import precision_score, recall_score\n\ndef main():\n st.title(\"Credit card default app\")\n html_temp=\"\"\"\n <div style=\"background-color:maroon ;padding:10px\">\n <h2 style=\"color:white;text-align:center;\">Hello viewer </h2>\n </div>\n \"\"\"\n st.markdown(html_temp,unsafe_allow_html=True)\n option=st.write(\"Created by: Mirthula\")\n st.header(option)\n st.sidebar.title(\"Credit Card Default Web App\")\n st.markdown(\"Chances of account to become default by next month 💳\")\n st.markdown(\"Our client has a credit card company. They have brought us a dataset that includes some demographics and recent financial data (the past six months) for a sample of 30,000 of their account holders. This data is at the credit account level; in other words, there is one row for each account. Rows are labeled by whether in the next month after the six month historical data period, an account owner has defaulted, or in other words, failed to make the minimum payment.\")\n st.markdown(\"Choose your classifier from the given drop down box and you can set your hyperparameters accordingly and obtain the suitable metrics\")\n st.sidebar.markdown(\"Chances of account to get defaulted by next month 💳\")\n\n\n @st.cache(persist=True)\n def load_data():\n url = \"https://raw.githubusercontent.com/mmirthula02/Credit-card/main/cleaned_data.csv\"\n data = pd.read_csv(url,sep=\",\")\n labelencoder=LabelEncoder()\n for col in data.columns:\n data[col] = labelencoder.fit_transform(data[col])\n return data\n\n @st.cache(persist=True)\n def split(df):\n y = df['default payment next month'].values\n x = df.drop(columns=['default payment next month','ID', 'SEX', 'PAY_2', 'PAY_3', 'PAY_4', 'PAY_5', 'PAY_6',\n 'EDUCATION_CAT', 'graduate school', 'high school','others', 'university'])\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=0)\n return x_train, x_test, y_train, y_test\n\n def plot_metrics(metrics_list):\n if 'Confusion Matrix' in metrics_list:\n st.subheader(\"Confusion Matrix\")\n plot_confusion_matrix(model, x_test, y_test, display_labels=class_names)\n st.pyplot()\n\n if 'ROC Curve' in metrics_list:\n st.subheader(\"ROC Curve\")\n plot_roc_curve(model, x_test, y_test)\n st.pyplot()\n st.set_option('deprecation.showPyplotGlobalUse', False)\n #fig, ax = matplotlib.pyplot.subplots()\n #ax.plot([0,0.5,1],[0,0.5,1])\n #st.pyplot(fig)\n\n \n \n if 'Precision-Recall Curve' in metrics_list:\n st.subheader('Precision-Recall Curve')\n plot_precision_recall_curve(model, x_test, y_test)\n st.pyplot()\n st.set_option('deprecation.showPyplotGlobalUse', False)\n #fig, ax = matplotlib.pyplot.subplots()\n #ax.plot([0,0.5,1],[0,0.5,1])\n #st.pyplot(fig)\n\n df = load_data()\n class_names = ['Default', 'Not Default']\n \n x_train, x_test, y_train, y_test = split(df)\n\n st.sidebar.subheader(\"Choose Classifier\")\n classifier = st.sidebar.selectbox(\"Classifier\", (\"Logistic Regression\", \"Random Forest\"))\n\n if st.sidebar.checkbox(\"Show raw data\", False):\n st.subheader(\"Credit card default (Classification)\")\n st.write(df)\n #st.markdown(\"This data set includes descriptions of bank account credit card payment data, we prdict the result analyzing these datas.\")\n\n if classifier == 'Logistic Regression':\n st.sidebar.subheader(\"Model Hyperparameters\")\n C = st.sidebar.number_input(\"C (Regularization parameter)\", 0.01, 10.0, step=0.01, key='C_LR')\n max_iter = st.sidebar.slider(\"Maximum number of iterations\", 50, 500, key='max_iter')\n\n metrics = st.sidebar.multiselect(\"What metrics to plot?\", ('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve'))\n\n if st.sidebar.button(\"Classify\", key='classify'):\n st.subheader(\"Logistic Regression Results\")\n model = LogisticRegression(C=C, penalty='l2', max_iter=max_iter)\n model.fit(x_train, y_train)\n accuracy = model.score(x_test, y_test)\n y_pred = model.predict(x_test)\n st.write(\"Accuracy: \", accuracy.round(2))\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=class_names).round(2))\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=class_names).round(2))\n plot_metrics(metrics)\n \n if classifier == 'Random Forest':\n st.sidebar.subheader(\"Model Hyperparameters\")\n n_estimators = st.sidebar.number_input(\"The number of trees in the forest\", 100, 5000, step=10, key='n_estimators')\n max_depth = st.sidebar.number_input(\"The maximum depth of the tree\", 1, 20, step=1, key='n_estimators')\n bootstrap = st.sidebar.radio(\"Bootstrap samples when building trees\", ('True', 'False'), key='bootstrap')\n metrics = st.sidebar.multiselect(\"What metrics to plot?\", ('Confusion Matrix', 'ROC Curve', 'Precision-Recall Curve'))\n\n if st.sidebar.button(\"Classify\", key='classify'):\n st.subheader(\"Random Forest Results\")\n model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, bootstrap=bootstrap, n_jobs=-1)\n model.fit(x_train, y_train)\n accuracy = model.score(x_test, y_test)\n y_pred = model.predict(x_test)\n st.write(\"Accuracy: \", accuracy.round(2))\n st.write(\"Precision: \", precision_score(y_test, y_pred, labels=class_names).round(2))\n st.write(\"Recall: \", recall_score(y_test, y_pred, labels=class_names).round(2))\n plot_metrics(metrics)\n\n \n\nif __name__ == '__main__':\n main()\n\n\n\n\n\n\n" } ]
2
jgold3/data-structures-game
https://github.com/jgold3/data-structures-game
065174a4db3cf34d90438cb5a01f0aeaee851410
c29deac38c10782883581963b2064fd6de4370b6
40b87ffd7a2225115676777461396800f8659c3f
refs/heads/master
2023-03-30T10:18:47.134358
2021-03-29T20:58:54
2021-03-29T20:58:54
346,151,382
0
0
Apache-2.0
2021-03-09T21:38:02
2021-03-09T21:38:03
2021-03-29T20:24:30
null
[ { "alpha_fraction": 0.6033519506454468, "alphanum_fraction": 0.6033519506454468, "avg_line_length": 13.916666984558105, "blob_id": "18e3675de5c82d6ae8de26bad001078cfa71fc61", "content_id": "bd24fee29b7c9511ab905af8c2832bfce8b3bfce", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 179, "license_type": "permissive", "max_line_length": 25, "num_lines": 12, "path": "/src/Components/Columns.js", "repo_name": "jgold3/data-structures-game", "src_encoding": "UTF-8", "text": "export const COLUMNS = [\n\t{\n\t\tHeader: 'Username',\n\t\tFooter: 'Username',\n\t\taccessor: 'user_id'\n\t},\n\t{\n\t\tHeader: 'Career Score',\n\t\tFooter: 'Career Score',\n\t\taccessor: 'points'\n\t}\n]\n" }, { "alpha_fraction": 0.6630824208259583, "alphanum_fraction": 0.6630824208259583, "avg_line_length": 40.849998474121094, "blob_id": "dfb96c2cf0d5e049ebaa653940991192c8f3e7a7", "content_id": "a16c4da34a902c661e50c84933a47a9268fb64e1", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 837, "license_type": "permissive", "max_line_length": 129, "num_lines": 20, "path": "/game_board/urls.py", "repo_name": "jgold3/data-structures-game", "src_encoding": "UTF-8", "text": "\"\"\"\n URL's for the Game Board app.\n\"\"\"\nfrom django.urls import path\nfrom game_board.api import api\nfrom . import views\n\nurlpatterns = [\n\n # Views\n path('', views.game_board, name='game-board'),\n\n # Game Play API Calls\n path('api', api.api_overview, name='game-board-api_overview'),\n path('api/start_game/<str:difficulty>/<str:player_ids>/<str:data_structures>', api.start_game, name='game-board-start_game'),\n path('api/board/<str:game_id>', api.board, name='game-board-game_status'),\n path('api/rebalance/<str:game_id>/<str:user_id>/<str:token>', api.rebalance, name='game-board-rebalance'),\n path('api/ai_pick/<str:game_id>/<str:user_id>/<str:token>', api.ai_pick, name='game-board-ai_pick'),\n path('api/action/<str:card>/<str:game_id>/<str:user_id>/<str:token>', api.action, name='game-board-action'),\n]\n" } ]
2
davidkorea/mobile_data_usage
https://github.com/davidkorea/mobile_data_usage
cf47c89494e3185c67c12740a30fb428b2506e4e
e044a1542e7f1657bf508c5c872d36914d33eac7
cf0e1a048b024f4ceb9cc913071c3a939a781787
refs/heads/master
2020-03-16T07:15:28.032084
2018-05-09T03:52:11
2018-05-09T03:52:11
132,572,081
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6680260896682739, "alphanum_fraction": 0.6696574091911316, "avg_line_length": 28.926828384399414, "blob_id": "37d2af9f9c901dc5e8fc748847d15e161439dd70", "content_id": "a23f19a01860af6bbd9344428cb135f74a86961b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1226, "license_type": "no_license", "max_line_length": 94, "num_lines": 41, "path": "/mobile_data_usage.py", "repo_name": "davidkorea/mobile_data_usage", "src_encoding": "UTF-8", "text": "import os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pd_utils import utils\n\nuser_device = './mobile_data/user_device.csv'\nuser_usage = './mobile_data/user_usage.csv'\n\ndef collect_data():\n device_df = pd.read_csv(user_device)\n usage_df = pd.read_csv(user_usage)\n return device_df,usage_df\n\ndef process_data(device_df,usage_df):\n # platform + model\n device_df['platform_version'] = device_df['platform_version'].astype('str')\n device_df['system'] = device_df['platform'].str.cat(device_df['platform_version'],sep='_')\n # print(device_df['system'])\n\n merge_df = pd.merge(device_df,usage_df,how='inner',on='user_id')\n return merge_df\n\ndef analyse_data(merge_df):\n mean_mb_by_system = merge_df.groupby('system')['monthly_mb'].mean()\n mean_mb_by_system.sort_values(ascending=False,inplace=True)\n # print(mean_mb_by_system.head()\n mean_mb_by_system.plot(kind='bar',rot=45)\n plt.ylabel('Monthly Usage (MB)')\n plt.title('Avr data usage by mobile system')\n plt.tight_layout()\n plt.show()\n\ndef main():\n device_df, usage_df = collect_data()\n # utils(device_df)\n # utils(usage_df)\n merge_df = process_data(device_df, usage_df)\n analyse_data(merge_df)\n\n\nmain()" }, { "alpha_fraction": 0.6712962985038757, "alphanum_fraction": 0.6817129850387573, "avg_line_length": 23, "blob_id": "64a0348f3b90d8a4942efb7865ec32e8fc5cd946", "content_id": "5ef2a464f3fc01ed6a3dccdc0f0637e8db325244", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 865, "license_type": "no_license", "max_line_length": 94, "num_lines": 36, "path": "/README.md", "repo_name": "davidkorea/mobile_data_usage", "src_encoding": "UTF-8", "text": "# mobile_data_usage\n \n# 1. Summary\n\n1. read_csv\n```php\ndevice_df = pd.read_csv('./mobile_data/user_device.csv')\nusage_df = pd.read_csv('./mobile_data/user_usage.csv')\n```\n\n2. combine two colomns\n```php\n# transform 2 columns to str type\ndevice_df['platform_version'] = device_df['platform_version'].astype('str')\n\n# combine and create a new column 'system'\ndevice_df['system'] = device_df['platform'].str.cat( device_df['platform_version'], sep='_' )\n```\n\n3. merge two csv / dataframes\n```php\nmerge_df = pd.merge(device_df, usage_df, how='inner', on='user_id')\n```\n\n4. groupby\n```php\nmean_usage = merge_df.groupby('system')['monthly_mb'].mean()\n# high-low sorting, let new data return to the old data space without creating a new var/space\nmean_usage.sort_values(ascending=False, inplace=True)\n```\n\n4. plot\n```php\nmean_usage.plot(kind='bar',rot=45)\nplt.show()\n```\n" }, { "alpha_fraction": 0.7379181981086731, "alphanum_fraction": 0.7397769689559937, "avg_line_length": 24.66666603088379, "blob_id": "72a1f8ac958995ca4ab16d91cffa32c1140ee032", "content_id": "a6fe1f8a1686375d5cf7a88ba88b35b05695d589", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 72, "num_lines": 21, "path": "/hw_major_income/major_income.py", "repo_name": "davidkorea/mobile_data_usage", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nfrom pd_utils import utils\n\nedu = './data_employee/employee_edu.csv'\ninfo = './data_employee/employee_info.csv'\n\nedu_df = pd.read_csv(edu)\ninfo_df =pd.read_csv(info)\n\nmerge_df = pd.merge(edu_df,info_df,how='inner',on='EmployeeNumber')\n# utils(merge_df)\n\nmean_income = merge_df.groupby('EducationField')['MonthlyIncome'].mean()\nmean_income.sort_values(ascending=False,inplace=True)\nprint(mean_income)\n\nmean_income.plot(kind='bar',rot=0)\nplt.ylabel('Income')\nplt.tight_layout()\nplt.show()" } ]
3
juniorbraz93/Exercicios-de-python-2
https://github.com/juniorbraz93/Exercicios-de-python-2
1541cf877b3ac6fc947bf62ef5d2e14bf89c3d9a
82359b6b13e9f1848be0a7a6a51d3183df6c31b6
b9d00d6de7245d603790e67e54425dc255911445
refs/heads/master
2023-02-11T17:15:14.064182
2021-01-16T16:27:42
2021-01-16T16:27:42
330,205,353
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7395833134651184, "alphanum_fraction": 0.7395833134651184, "avg_line_length": 41.66666793823242, "blob_id": "293b910ef458b02a20d6d982ceca0842198653f1", "content_id": "43c7c785be143d593dac31ffe1bae013d98a49c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 98, "num_lines": 9, "path": "/08.py", "repo_name": "juniorbraz93/Exercicios-de-python-2", "src_encoding": "UTF-8", "text": "# Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês.\n# Calcule e mostre o total do seu salário no referido mês.\n\nsalario_hora = float(input('Quanto você ganha por hora: '))\nhora_mes = float(input('Qual o número de horas trabalhadas no mês: '))\n\nsalario = salario_hora * hora_mes\n\nprint('Total do seu sala´rio ne referido mês: ', salario)\n" }, { "alpha_fraction": 0.6927083134651184, "alphanum_fraction": 0.7083333134651184, "avg_line_length": 26.428571701049805, "blob_id": "48616fece75cd207c580009e7bdd1841abfd846c", "content_id": "742218bd5edf996da44a7d38483bbacdc55f091d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 60, "num_lines": 7, "path": "/05.py", "repo_name": "juniorbraz93/Exercicios-de-python-2", "src_encoding": "UTF-8", "text": "# Faça um Programa que converta metros para centímetros.\nmetros = float(input('Digite um valor em metros: '))\n\n\ncm = metros * 100\n\nprint(metros, 'metros, são o mesmo que', cm, 'centímetros.')\n" }, { "alpha_fraction": 0.800000011920929, "alphanum_fraction": 0.800000011920929, "avg_line_length": 99, "blob_id": "60fd360a0f5b4564d2eb47fb014b787c46714b19", "content_id": "e1b6bf0384ea0275310eb866dbec8b01d9d7edf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 102, "license_type": "no_license", "max_line_length": 99, "num_lines": 1, "path": "/10.py", "repo_name": "juniorbraz93/Exercicios-de-python-2", "src_encoding": "UTF-8", "text": "# Faça um Programa que peça a temperatura em graus Celsius, transforme e mostre em graus Farenheit.\n" }, { "alpha_fraction": 0.6267605423927307, "alphanum_fraction": 0.658450722694397, "avg_line_length": 34.5, "blob_id": "027d54ac488c5753e2f31219b5e35084a9297caf", "content_id": "584248cd4498686add6bc918718848caaea5d88c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 288, "license_type": "no_license", "max_line_length": 113, "num_lines": 8, "path": "/09.py", "repo_name": "juniorbraz93/Exercicios-de-python-2", "src_encoding": "UTF-8", "text": "# Faça um Programa que peça a temperatura em graus Farenheit, transforme e mostre a temperatura em graus Celsius.\n# C = 5 * ((F-32) / 9).\n\nF = int(input('Qual é a temperatura em graus Farenheit: '))\n\nC = 5 * ((F-32) / 9)\n\nprint('A temperatura em graus Celsius é: ', format(C, '.0f'))\n" }, { "alpha_fraction": 0.625, "alphanum_fraction": 0.6458333134651184, "avg_line_length": 26.428571701049805, "blob_id": "3c027800e565a14fdd610e449c9fe29e38f38a4a", "content_id": "8187f5f9d16f0a1d809e19b2ddf2f3fbb3cd4c63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 200, "license_type": "no_license", "max_line_length": 76, "num_lines": 7, "path": "/06.py", "repo_name": "juniorbraz93/Exercicios-de-python-2", "src_encoding": "UTF-8", "text": "# Faça um Programa que peça o raio de um círculo, calcule e mostre sua área.\n\nraio = float(input('Digite o raio de um círculo: '))\n\nA = (3.14 * (raio ** 2))\n\nprint('A área do círculo é: ', A)\n" }, { "alpha_fraction": 0.7151162624359131, "alphanum_fraction": 0.7209302186965942, "avg_line_length": 30.272727966308594, "blob_id": "4dfc7b4b924493f91ea6252f5bf5ce5b6c3a9bc9", "content_id": "70cee9a43d4cfeaa8e1fbea2e551f0a9c6dc9398", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 352, "license_type": "no_license", "max_line_length": 106, "num_lines": 11, "path": "/07.py", "repo_name": "juniorbraz93/Exercicios-de-python-2", "src_encoding": "UTF-8", "text": "# Faça um Programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o usuário.\n\nlado_quadrado = float(input(\"digite o valor do lado do quadrado: \"))\n\n\narea_quadrado = lado_quadrado ** 2\n\ndobro_area = area_quadrado * 2\n\nprint('A área do quadrado é: ', area_quadrado)\nprint('O dobro área do quadrado é: ', dobro_area)\n" }, { "alpha_fraction": 0.6712328791618347, "alphanum_fraction": 0.689497709274292, "avg_line_length": 26.375, "blob_id": "e52012589575d6a4258d51007a28fef8dcc84ffa", "content_id": "6e0f6bd810689c6111de18f2b19eca037ed64708", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 58, "num_lines": 8, "path": "/03.py", "repo_name": "juniorbraz93/Exercicios-de-python-2", "src_encoding": "UTF-8", "text": "# Faça um Programa que peça dois números e imprima a soma.\n\nnum1 = int(input('Digite o primeiro número: '))\nnum2 = int(input('Digite o segundo número: '))\n\nsoma = num1 + num2\n\nprint('A soma dos dois números é: ', soma)\n" } ]
7
Sufiya2007/newrepo
https://github.com/Sufiya2007/newrepo
bca684fc5f7f18bee5cafe50df4c47ac43d2ffbe
81ae1300f8781fe616b6859ef0485fcceb642ead
3a12429da114bc2bc80261e53220b2c3fb3a661c
refs/heads/master
2022-12-16T16:16:31.855791
2020-09-13T17:12:21
2020-09-13T17:12:21
295,198,429
0
0
null
2020-09-13T17:06:51
2020-09-13T17:12:23
2020-09-13T17:15:54
Python
[ { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7142857313156128, "avg_line_length": 15.333333015441895, "blob_id": "c373a34591fd7ed24e31f9139b778593e04564b4", "content_id": "37b742671b5d333e02c938a566519c822bf1bb49", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 49, "license_type": "no_license", "max_line_length": 29, "num_lines": 3, "path": "/sufi.py", "repo_name": "Sufiya2007/newrepo", "src_encoding": "UTF-8", "text": "#Display the text\n\nprint(\"This is the new file\")\n" } ]
1
lilydiao/allfiles
https://github.com/lilydiao/allfiles
cfee0a49e8b4b8d44f7fe1598e66f6158d19394f
ac89b5896f1d5ef237b8bdd1db245b978299e126
2980d4b11f850e5a1ba792c2fb7473950bbd6359
refs/heads/master
2020-06-20T11:41:08.012940
2019-07-16T03:11:52
2019-07-16T03:11:52
197,111,005
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.37458351254463196, "alphanum_fraction": 0.42418211698532104, "avg_line_length": 14.404204368591309, "blob_id": "f296b0f8d1005a0ccf4d3e2bd4641a4a23f6aff5", "content_id": "ad462b55879f82962d274e10d2b4e13e69747c46", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 44719, "license_type": "no_license", "max_line_length": 395, "num_lines": 2902, "path": "/halligalli7.py", "repo_name": "lilydiao/allfiles", "src_encoding": "UTF-8", "text": "# ------------------------------------------------------\r# Name: Zihan Diao,Helena Shangguan, Chenhui Jia\r# Filename: halligalli7.py\r# Date: 10-18-2018\r# Description: The program creats a board game called Halli Galli. Players deal cards\r# and calculate the total number of each fruit on the table. The player will\r# get extra cards if they press the bell when total number of the same fruit\r# is equal to five or ten. If pressing the bell incorrectly, they lose two cards.\r# The first person who runs out of cards looses.\r# ------------------------------------------------------\rfrom threading import Thread\r\rfrom time import sleep\r\rfrom graphics import*\r\rfrom random import*\r\rfrom playsound import playsound\r\rwin=GraphWin(\"background\",1400,800)\r\rstarter=Image(Point(700,400),\"Hali.gif\")\r\rstarter.draw(win)\r\renterText = Text(Point(900,700), \"Press ENTER to start >>>>>\")\r\renterText.setSize(30)\r\renterText.draw(win)\r\r\r\rclass Nb1():\r\r def draw(self):\r\r newbackground1=Image(Point(300,300),\"1.gif\")\r\r newbackground1.draw(win)\r\r\r\rclass Nb2():\r\r def draw(self):\r\r newbackground1=Image(Point(700,500),\"2.gif\")\r\r newbackground1.draw(win)\r\r\r\rclass Nb3():\r\r def draw(self):\r\r newbackground1=Image(Point(1102,300),\"3.gif\")\r\r newbackground1.draw(win)\r\r\r\rclass Card1():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 2\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"l2p1.gif\")\r\r card.draw(win)\r\r\r\rclass Card2():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 0\r\r self.lime = 3\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"l3b1.gif\")\r\r card.draw(win)\r\r\r\rclass Card3():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b1.gif\")\r\r card.draw(win)\r\r\r\rclass Card4():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 0\r\r self.lime = 1\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b1l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card5():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 1\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b1s1.gif\")\r\r card.draw(win)\r\r\r\rclass Card6():\r\r def __init__(self):\r\r self.banna = 2\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b2.gif\")\r\r card.draw(win)\r\r\r\rclass Card7():\r\r def __init__(self):\r\r self.banna = 2\r\r self.strawberry = 0\r\r self.lime = 1\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b2l1.gif\")\r\r card.draw(win)\r\r\r\r\r\rclass Card8():\r\r def __init__(self):\r\r self.banna = 2\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"b2p1.gif\")\r\r card.draw(win)\r\r\r\rclass Card9():\r\r def __init__(self):\r\r self.banna = 2\r\r self.strawberry = 1\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b2s1.gif\")\r\r card.draw(win)\r\r\r\rclass Card10():\r\r def __init__(self):\r\r self.banna = 3\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b3.gif\")\r\r card.draw(win)\r\r\r\rclass Card11():\r\r def __init__(self):\r\r self.banna = 3\r\r self.strawberry = 0\r\r self.lime = 1\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b3l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card12():\r\r def __init__(self):\r\r self.banna = 3\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"b3p1.gif\")\r\r card.draw(win)\r\r\r\rclass Card13():\r\r def __init__(self):\r\r self.banna = 3\r\r self.strawberry = 1\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b3s1.gif\")\r\r card.draw(win)\r\r\r\rclass Card14():\r\r def __init__(self):\r\r self.banna = 4\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b4.gif\")\r\r card.draw(win)\r\r\r\rclass Card15():\r\r def __init__(self):\r\r self.banna = 4\r\r self.strawberry = 1\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b4s1.gif\")\r\r card.draw(win)\r\r\r\r\r\rclass Card16():\r\r def __init__(self):\r\r self.banna = 5\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"b5.gif\")\r\r card.draw(win)\r\r\r\rclass Card17():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 1\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card18():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 2\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"l2.gif\")\r\r card.draw(win)\r\r\r\rclass Card19():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 0\r\r self.lime = 2\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"l2b1.gif\")\r\r card.draw(win)\r\r\r\r\r\rclass Card20():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 1\r\r self.lime = 2\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"l2s1.gif\")\r\r card.draw(win)\r\r\r\rclass Card21():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 3\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"l3.gif\")\r\r card.draw(win)\r\r\r\rclass Card22():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 0\r\r self.lime = 3\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"l3b1.gif\")\r\r card.draw(win)\r\r\r\rclass Card23():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 3\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"l3p1.gif\")\r\r card.draw(win)\r\r\r\rclass Card24():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 1\r\r self.lime = 3\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"l3s1.gif\")\r\r card.draw(win)\r\r\r\rclass Card25():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 4\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"l4.gif\")\r\r card.draw(win)\r\r\r\rclass Card26():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 5\r\r def draw(self,point):\r\r card=Image(point,\"p5.gif\")\r\r card.draw(win)\r\r\r\rclass Card27():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"p1.gif\")\r\r card.draw(win)\r\r\r\rclass Card28():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"p1b1.gif\")\r\r card.draw(win)\r\r\r\rclass Card29():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 1\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"p1l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card30():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 2\r\r def draw(self,point):\r\r card=Image(point,\"p2.gif\")\r\r card.draw(win)\r\r\r\rclass Card31():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 2\r\r def draw(self,point):\r\r card=Image(point,\"p2b1.gif\")\r\r card.draw(win)\r\r\r\rclass Card32():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 1\r\r self.plum = 2\r\r def draw(self,point):\r\r card=Image(point,\"p2l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card33():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 1\r\r self.lime = 0\r\r self.plum = 2\r\r def draw(self,point):\r\r card=Image(point,\"p2s1.gif\")\r\r card.draw(win)\r\r\r\rclass Card34():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 3\r\r def draw(self,point):\r\r card=Image(point,\"p3.gif\")\r\r card.draw(win)\r\r\r\rclass Card35():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 3\r\r def draw(self,point):\r\r card=Image(point,\"p3b1.gif\")\r\r card.draw(win)\r\r\r\rclass Card36():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 1\r\r self.plum = 3\r\r def draw(self,point):\r\r card=Image(point,\"p3l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card37():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 1\r\r self.lime = 0\r\r self.plum = 3\r\r def draw(self,point):\r\r card=Image(point,\"p3s1.gif\")\r\r card.draw(win)\r\r\r\rclass Card38():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 4\r\r def draw(self,point):\r\r card=Image(point,\"p4.gif\")\r\r card.draw(win)\r\r\r\rclass Card39():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 0\r\r self.lime = 0\r\r self.plum = 5\r\r def draw(self,point):\r\r card=Image(point,\"p5.gif\")\r\r card.draw(win)\r\r\r\rclass Card40():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 1\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s1.gif\")\r\r card.draw(win)\r\r\r\rclass Card41():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 1\r\r self.lime = 0\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"s1b1p1.gif\")\r\r card.draw(win)\r\r\r\rclass Card42():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 1\r\r self.lime = 1\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"s1b1p1l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card43():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 1\r\r self.lime = 1\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s1l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card44():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 1\r\r self.lime = 0\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"s1p1.gif\")\r\r card.draw(win)\r\r\r\rclass Card45():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 2\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s2.gif\")\r\r card.draw(win)\r\r\r\rclass Card46():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 2\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s2b1.gif\")\r\r card.draw(win)\r\r\r\rclass Card47():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 2\r\r self.lime = 1\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s2l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card48():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 2\r\r self.lime = 0 \r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"s2p1.gif\")\r\r card.draw(win)\r\r\r\rclass Card49():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 3\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s3.gif\")\r\r card.draw(win)\r\r\r\rclass Card50():\r\r def __init__(self):\r\r self.banna = 1\r\r self.strawberry = 3\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s3b1.gif\")\r\r card.draw(win)\r\r\r\rclass Card51():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 3\r\r self.lime = 1\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s3l1.gif\")\r\r card.draw(win)\r\r\r\rclass Card52():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 3\r\r self.lime = 0\r\r self.plum = 1\r\r def draw(self,point):\r\r card=Image(point,\"s3p1.gif\")\r\r card.draw(win)\r\r\r\rclass Card53():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 4\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s4.gif\")\r\r card.draw(win)\r\r\r\rclass Card54():\r\r def __init__(self):\r\r self.banna = 0\r\r self.strawberry = 5\r\r self.lime = 0\r\r self.plum = 0\r\r def draw(self,point):\r\r card=Image(point,\"s5.gif\")\r\r card.draw(win)\r\r\r\rclass Light():\r\r def __init__(self,point):\r\r self.light = Circle(point,10)\r\r self.light.draw(win)\r\r def lighton(self):\r\r self.light.setFill(\"green\")\r\r \r\r def lightoff(self):\r\r self.light.setFill(\"red\")\r\r\r\rclass Player():\r\r def __init__(self):\r\r self.unflippedCards = 10\r\r self.flippedCards = 0\r\r def flipCard(self):\r\r self.unflippedCards -= 1\r\r self.flippedCards += 1\r\r def takeCards(self,p1,p2):\r\r self.flippedCards = 0\r\r p1.flippedCards = 0\r\r p2.flippedCards = 0\r\r def penalty(self,p1,p2):\r\r self.unflippedCards -= 2\r\r p1.unflippedCards += 1\r\r p2.unflippedCards += 1\r\r\r\r\r\rdef main():\r #play back ground music\r\r def play():\r playsound(\"bgm.mp3\")\r \r T = Thread(target=play) # create thread\r T.start()# Launch created thread\r \r\r #creat start page \r while True:\r\r waitForEnter=win.getKey()\r\r if waitForEnter==\"Return\":\r\r background=Image(Point(750,700),\"1600.gif\")\r\r background.draw(win)\r\r break\r\r else:\r\r waitForEnter=win.getKey()\r\r\r \r #record card on desk\r cardOnDesk=0\r\r\r #record the total number of each fruit\r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r \r\r #creat player instances\r\r p1=Player()\r\r light1=Light(Point(100,100))#change parameter\r\r light1.lightoff()\r\r \r\r p2=Player()\r\r light2=Light(Point(100,200))\r\r light2.lightoff()\r\r \r\r p3=Player()\r\r light3=Light(Point(100,300))\r\r light3.lightoff()\r\r\r\r #creat scoring board for each player\r\r #p1\r\r bb1=Image(Point(300,200),\"lilb.gif\")\r\r bb1.draw(win)\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r \r\r\r\r #p2\r\r bb2=Image(Point(700,400),\"lilb.gif\")\r\r bb2.draw(win)\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r\r\r #p3\r\r bb3=Image(Point(1100,200),\"lilb.gif\")\r\r bb3.draw(win)\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win)\r\r\r\r #creat each individual card instances and store in a list\r\r card1=Card1()\r\r card2=Card2()\r\r card3=Card3()\r\r card4=Card4()\r\r card5=Card5()\r\r card6=Card6()\r\r card7=Card7()\r\r card8=Card8()\r\r card9=Card9()\r\r card10=Card10()\r\r card11=Card11()\r\r card12=Card12()\r\r card13=Card13()\r\r card14=Card14()\r\r card15=Card15()\r\r card16=Card16()\r\r card17=Card17()\r\r card18=Card18()\r\r card19=Card19()\r\r card20=Card20()\r\r card21=Card21()\r\r card22=Card22()\r\r card23=Card23()\r\r card24=Card24()\r\r card25=Card25()\r\r card26=Card26()\r\r card27=Card27()\r\r card28=Card28()\r\r card29=Card29()\r\r card30=Card30()\r\r card31=Card31()\r\r card32=Card32()\r\r card33=Card33()\r\r card34=Card34()\r\r card35=Card35()\r\r card36=Card36()\r\r card37=Card37()\r\r card38=Card38()\r\r card39=Card39()\r\r card40=Card40()\r\r card41=Card41()\r\r card42=Card42()\r\r card43=Card43()\r\r card44=Card44()\r\r card45=Card45()\r\r card46=Card46()\r\r card47=Card47()\r\r card48=Card48()\r\r card49=Card49()\r\r card50=Card50()\r\r card51=Card51()\r\r card52=Card52()\r\r card53=Card53()\r\r card54=Card54()\r\r \r\r cardList=[card1,card2,card3,card4,card5,card6,card7,card8,card9,card10,card11,card12,card13,card14,card15,card16,card17,card18,card19,card20,card21,card22,card23,card24,card25,card26,card27,card28,card29,card30,card31,card32,card33,card34,card35,card36,card37,card38,card39,card40,card41,card42,card43,card44,card45,card46,card47,card48,card49,card50,card51,card52,card53,card54] \r\r \r\r\r\r \r\r## #welcome\r\r## print(\"welcome\")\r\r \r\r\r\r while True:\r\r #record the total number of each fruit\r\r tbanna=tbanna1+tbanna2+tbanna3\r\r tstrawberry=tstrawberry1+tstrawberry2+tstrawberry3\r\r tlime=tlime1+tlime2+tlime3\r\r tplum=tplum1+tplum2+tplum3\r\r \r\r #player1's turn to flippcard\r\r light1.lighton()\r\r key=win.getKey()\r\r if key==\"a\":\r\r if p1.unflippedCards!=0:\r\r card=choice(cardList)\r\r tbanna1=card.banna\r\r tstrawberry1=card.strawberry\r\r tlime1=card.lime\r\r tplum1=card.plum\r\r card.draw(Point(300,300))\r\r p1.flipCard()\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r #record the total number of each fruit\r\r cardOnDesk+=1\r\r tbanna=tbanna1+tbanna2+tbanna3\r\r tstrawberry=tstrawberry1+tstrawberry2+tstrawberry3\r\r tlime=tlime1+tlime2+tlime3\r\r tplum=tplum1+tplum2+tplum3\r\r else:\r\r background1=Image(Point(750,700),\"16001.gif\")\r\r background1.draw(win)\r\r bar1=Image(Point(700,400),\"bar1.gif\")\r\r bar1.draw(win)\r\r end1 = Text(Point(700,400), \"Player1 ran out of cards!\")\r\r end1.setSize(30)\r\r end1.draw(win)\r\r break\r\r\r\r #check if someone press the bell\r\r key=win.getKey()\r\r if key==\"s\":\r\r #play sound\r\r playsound(\"bell.wav\")\r\r \r\r if (tbanna==5 or tstrawberry==5\r\r or tlime==5 or tplum==5 or tbanna==10 or tstrawberry==10\r\r or tlime==10 or tplum==10):\r\r #give the card to player1\r\r p1.unflippedCards+=cardOnDesk\r\r \r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r cardOnDesk=0\r\r \r\r########################\r\r NB1=Nb1()\r\r NB1.draw()\r\r NB2=Nb2()\r\r NB2.draw()\r\r NB3=Nb3()\r\r NB3.draw()\r\r\r\r else:\r\r p1.penalty(p2,p3)\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r\r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win) \r\r \r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r\r\r elif key==\"b\":\r\r #play sound\r\r playsound(\"bell.wav\")\r\r\r\r if (tbanna==5 or tstrawberry==5\r\r or tlime==5 or tplum==5 or tbanna==10 or tstrawberry==10\r\r or tlime==10 or tplum==10):\r\r p2.unflippedCards+=cardOnDesk\r\r #change the scoring board\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win) \r\r\r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r cardOnDesk=0\r\r\r\r########################\r\r NB1=Nb1()\r\r NB1.draw()\r\r NB2=Nb2()\r\r NB2.draw()\r\r NB3=Nb3()\r\r NB3.draw()\r\r \r\r else:\r\r p2.penalty(p1,p3)\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r \r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r\r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win) \r\r \r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r\r\r elif key==\"l\":\r\r #play sound\r\r playsound(\"bell.wav\")\r\r\r\r if (tbanna==5 or tstrawberry==5\r\r or tlime==5 or tplum==5 or tbanna==10 or tstrawberry==10\r\r or tlime==10 or tplum==10):\r\r p3.unflippedCards+=cardOnDesk\r\r #changing the scoring board\r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win)\r\r \r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r cardOnDesk=0\r\r########################\r\r NB1=Nb1()\r\r NB1.draw()\r\r NB2=Nb2()\r\r NB2.draw()\r\r NB3=Nb3()\r\r NB3.draw()\r\r else:\r\r p3.penalty(p1,p2)\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r \r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win)\r\r \r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r \r\r else:\r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r \r\r light1.lightoff()\r\r\r\r #player2's turn\r\r light2.lighton()\r\r key=win.getKey()\r\r if key==\"v\":\r\r if p2.unflippedCards!=0:\r\r card=choice(cardList)\r\r tbanna2=card.banna\r\r tstrawberry2=card.strawberry\r\r tlime2=card.lime\r\r tplum2=card.plum\r\r card.draw(Point(700,500))\r\r p2.flipCard()\r\r #change the scoring board\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win) \r\r cardOnDesk+=1\r\r #record the total number of each fruit\r\r tbanna=tbanna1+tbanna2+tbanna3\r\r tstrawberry=tstrawberry1+tstrawberry2+tstrawberry3\r\r tlime=tlime1+tlime2+tlime3\r\r tplum=tplum1+tplum2+tplum3\r\r else:\r\r background2=Image(Point(750,700),\"16001.gif\")\r\r background2.draw(win)\r\r bar2=Image(Point(700,400),\"bar2.gif\")\r\r bar2.draw(win)\r\r end2 = Text(Point(700,400), \"Player2 ran out of cards!\")\r\r end2.setSize(30)\r\r end2.draw(win)\r\r break\r\r \r\r #check if someone press the bell\r\r key=win.getKey()\r\r if key==\"s\":\r\r if (tbanna==5 or tstrawberry==5\r\r or tlime==5 or tplum==5 or tbanna==10 or tstrawberry==10\r\r or tlime==10 or tplum==10):\r\r #play sound\r\r playsound(\"bell.wav\")\r\r \r\r p1.unflippedCards+=cardOnDesk\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r cardOnDesk=0\r\r\r\r########################\r\r NB1=Nb1()\r\r NB1.draw()\r\r NB2=Nb2()\r\r NB2.draw()\r\r NB3=Nb3()\r\r NB3.draw()\r\r \r\r else:\r\r p1.penalty(p2,p3)\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r \r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win) \r\r \r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r\r\r elif key==\"b\":\r\r if (tbanna==5 or tstrawberry==5\r\r or tlime==5 or tplum==5 or tbanna==10 or tstrawberry==10\r\r or tlime==10 or tplum==10):\r\r p2.unflippedCards+=cardOnDesk\r\r #change the scoring board\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win) \r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r\r\r cardOnDesk=0\r\r\r\r########################\r\r NB1=Nb1()\r\r NB1.draw()\r\r NB2=Nb2()\r\r NB2.draw()\r\r NB3=Nb3()\r\r NB3.draw()\r\r \r\r else:\r\r p2.penalty(p1,p3)\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r \r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win)\r\r \r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r\r\r elif key==\"l\":\r\r if (tbanna==5 or tstrawberry==5\r\r or tlime==5 or tplum==5 or tbanna==10 or tstrawberry==10\r\r or tlime==10 or tplum==10):\r\r #play sound\r\r playsound(\"bell.wav\")\r\r \r\r p3.unflippedCards+=cardOnDesk\r\r #changing the scoring board\r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win) \r\r\r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r cardOnDesk=0\r\r\r\r########################\r\r NB1=Nb1()\r\r NB1.draw()\r\r NB2=Nb2()\r\r NB2.draw()\r\r NB3=Nb3()\r\r NB3.draw()\r\r \r\r else:\r\r p3.penalty(p1,p2)\r\r #changing the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r \r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win)\r\r \r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r else:\r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r \r\r light2.lightoff()\r\r\r\r #player3's turn \r\r light3.lighton()\r\r key=win.getKey()\r\r if key==\"k\":\r\r if p3.unflippedCards!=0:\r\r card=choice(cardList)\r\r tbanna3=card.banna\r\r tstrawberry3=card.strawberry\r\r tlime3=card.lime\r\r tplum3=card.plum\r\r card.draw(Point(1100,300))\r\r p3.flipCard()\r\r #changing the scoring board\r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win)\r\r else:\r\r background3=Image(Point(750,700),\"16001.gif\")\r\r background3.draw(win)\r\r bar3=Image(Point(700,400),\"bar3.gif\")\r\r bar3.draw(win)\r\r end3 = Text(Point(700,400), \"Player3 ran out of cards!\")\r\r end3.setSize(30)\r\r end3.draw(win)\r\r break\r\r\r\r #record the total number of each fruit\r\r cardOnDesk+=1\r\r tbanna=tbanna1+tbanna2+tbanna3\r\r tstrawberry=tstrawberry1+tstrawberry2+tstrawberry3\r\r tlime=tlime1+tlime2+tlime3\r\r tplum=tplum1+tplum2+tplum3\r\r\r\r #check if someone press the bell\r\r key=win.getKey()\r\r if key==\"s\":\r\r #play sound\r\r playsound(\"bell.wav\")\r\r \r\r if (tbanna==5 or tstrawberry==5\r\r or tlime==5 or tplum==5 or tbanna==10 or tstrawberry==10\r\r or tlime==10 or tplum==10):\r\r p1.unflippedCards+=cardOnDesk\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r \r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r cardOnDesk=0\r\r\r\r########################\r\r NB1=Nb1()\r\r NB1.draw()\r\r NB2=Nb2()\r\r NB2.draw()\r\r NB3=Nb3()\r\r NB3.draw()\r\r else:\r\r p1.penalty(p2,p3)\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r \r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win)\r\r \r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r\r\r elif key==\"b\":\r\r #play sound\r\r playsound(\"bell.wav\")\r\r \r\r if (tbanna==5 or tstrawberry==5\r\r or tlime==5 or tplum==5 or tbanna==10 or tstrawberry==10\r\r or tlime==10 or tplum==10):\r\r p2.unflippedCards+=cardOnDesk\r\r #change the scoring board\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win) \r\r cardOnDesk+=1\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win) \r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r cardOnDesk=0\r\r\r\r########################\r\r NB1=Nb1()\r\r NB1.draw()\r\r NB2=Nb2()\r\r NB2.draw()\r\r NB3=Nb3()\r\r NB3.draw()\r\r \r\r else:\r\r p2.penalty(p1,p3)\r\r #change the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r \r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win)\r\r\r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r\r\r elif key==\"l\":\r\r #play sound\r\r playsound(\"bell.wav\")\r\r if (tbanna==5 or tstrawberry==5\r\r or tlime==5 or tplum==5 or tbanna==10 or tstrawberry==10\r\r or tlime==10 or tplum==10):\r\r p3.unflippedCards+=cardOnDesk\r\r #changing the scoring board\r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win) \r\r cardOnDesk+=1\r\r #p1\r\r tbanna1=0\r\r tstrawberry1=0\r\r tlime1=0\r\r tplum1=0\r\r\r\r #p2\r\r tbanna2=0\r\r tstrawberry2=0\r\r tlime2=0\r\r tplum2=0\r\r\r\r #p3\r\r tbanna3=0\r\r tstrawberry3=0\r\r tlime3=0\r\r tplum3=0\r\r cardOnDesk=0\r\r \r\r########################\r\r NB1=Nb1()\r\r NB1.draw()\r\r NB2=Nb2()\r\r NB2.draw()\r\r NB3=Nb3()\r\r NB3.draw()\r\r \r\r else:\r\r p3.penalty(p1,p2)\r\r #changing the scoring board\r\r board1.undraw()\r\r board1 = Text(Point(300,200), \"Player1: \"+str(p1.unflippedCards))\r\r board1.setSize(30)\r\r board1.draw(win)\r\r\r\r board2.undraw()\r\r board2 = Text(Point(700,400), \"Player2: \"+str(p2.unflippedCards))\r\r board2.setSize(30)\r\r board2.draw(win)\r\r \r\r board3.undraw()\r\r board3 = Text(Point(1100,200), \"Player3: \"+str(p3.unflippedCards))\r\r board3.setSize(30)\r\r board3.draw(win) \r\r\r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r else:\r\r print(p1.unflippedCards)\r\r print(p2.unflippedCards)\r\r print(p3.unflippedCards)\r\r print(\"__________\")\r\r \r\r light3.lightoff()\r\r\r\rif __name__==\"__main__\":\r\r main()\r\r\r#references\r# Learned how to intall playsound from the source code\r# from https://pypi.org/project/playsound/ and with the help of Georgina\r\r# Learned how to play music as background by creating a thread\r# from https://stackoverflow.com/questions/53246933/python-execute-playsound-in-separate-thread\r\r \r\r\r" } ]
1
ft9738962/disaster_response
https://github.com/ft9738962/disaster_response
3544ec5b4850101e604f4418e6e316ff1ddefc07
34b15b90946831bdebdd0665cc96c04fde960f19
74d1ff9f14ed9fb0a5e99a19ba4958fd25a27315
refs/heads/master
2020-04-04T19:58:20.327403
2018-11-08T12:37:00
2018-11-08T12:37:00
156,228,277
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7589927911758423, "alphanum_fraction": 0.7688848972320557, "avg_line_length": 45.33333206176758, "blob_id": "c271fae510e57eff239f7a6711c2677200c8a4a9", "content_id": "ff318a97d2bf6452aac4be815df70590e579027d", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2224, "license_type": "permissive", "max_line_length": 343, "num_lines": 48, "path": "/README.md", "repo_name": "ft9738962/disaster_response", "src_encoding": "UTF-8", "text": "# 1. Purpose\nThis project is a web app using machine learning which is for predicting classification for disaster type\n\nThe machine learning model is based on data from [Figure Eight](www.figure-eight.com)\n\nThe final app will be presented with on a web page. Users can type any sentence to test the model and see how it category sentence to existing labels\n\n# 2. File Description\n\n- data:\n - process_data.py: pipeline for data wrangling and stores clean data in SQLite database\n - DisasterResponse.db: database stored clean data from process_data.py\n - disaster_messages.csv: messages dataset\n - disaster_categories.csv: category for message text\n- models:\n - train_classifier.py: machine learning module to create appropriate model\n- app:\n - templates:\n - go.html: interact with message and show results from machine learning model\n - master.html: basic html for web app\n - run.py: include flask for backend and plotly with data to create figure\n- pic:\n - category.jpg: Figure used in README.md\n- ETL_Pipeline_Preparation.ipynb: Prepare file to build the data extraction-tranform-load pipeline\n- ML_Pipeline_Preparation.ipynb: Prepare file to grid search best parameters in different machine learning models\n\n\n# 3. Defect of the Raw Dataset\nIn the main page of the web application, a graph of category count of the raw dataset has been provided.\n\n![category figure](https://github.com/ft9738962/disaster_response/raw/master/pic/category.jpg)\n\nFrom the figure, it turns out that the proportion of each category is imbalanced. For some categories like \"Storm\", \"food\", \"water\", messages text resource is plenty. While for some other categories, such as \"missing people\", \"fire\", the number of related message text is rare. Especially there is no message belongs to \"child_alone\" category.\n\nSo the fact will result inaccuracy of the model to judge message which should be classified to categories with minimum training resource.\n\n# 4. How to Interact with the Project\n\nAny improvement suggestion is appreciated especially for the better machine learning model.\n\nThe pushed code should follow PEP-8 style.\n\n# 5. Licensing\n\nBSD 3-clause\n\n# 6. Authors\n[Max Qiu](https://github.com/ft9738962)\n" }, { "alpha_fraction": 0.6464121341705322, "alphanum_fraction": 0.652208685874939, "avg_line_length": 30.471698760986328, "blob_id": "64dc102d11519bb6cfd9e4412544c40b49b4ec53", "content_id": "d5885a9a702663ad10ae914a29926053cfc7163a", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5003, "license_type": "permissive", "max_line_length": 142, "num_lines": 159, "path": "/models/train_classifier.py", "repo_name": "ft9738962/disaster_response", "src_encoding": "UTF-8", "text": "import sys\nfrom sqlalchemy import create_engine\nimport nltk\nnltk.download(['punkt', 'wordnet', 'averaged_perceptron_tagger'])\n\nimport re\nimport numpy as np\nimport pandas as pd\nfrom sklearn.externals import joblib\nfrom nltk.tokenize import word_tokenize\nfrom nltk.stem import WordNetLemmatizer\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.metrics import classification_report, accuracy_score, precision_recall_fscore_support\n\ndef load_data(database_filepath):\n '''Load data from database and convert to two data: X(message) and Y(label)\n \n Input:\n database_filepath: filepath of the database\n \n Output:\n X: messages\n Y: labels\n category_names: names of label\n '''\n \n engine = create_engine(f'sqlite:///{database_filepath}')\n df = pd.read_sql_table('disaster_response', engine)\n X = df.loc[:,'message']\n Y = df.iloc[:, 4:]\n category_names = Y.columns.tolist()\n \n return X, Y, category_names\n\ndef tokenize(text):\n '''Tokenize a text message to a list of lemmatized words\n \n Input:\n text: The text meassage to be tokenized\n \n Output:\n clean_words: List of tokenized and lemmatized words\n ''' \n text = word_tokenize(re.sub(r'[^a-zA-Z0-9]',' ',text).lower().strip())\n lemmatizer = WordNetLemmatizer()\n \n clean_words = []\n for word in text:\n clean_word = lemmatizer.lemmatize(word, pos='v')\n clean_words.append(clean_word)\n\n return clean_words\n\nclass TextLengthExtractor(BaseEstimator, TransformerMixin):\n '''\n Add the length of the text message as a feature to dataset\n \n The assumption is people who is in urgent disaster condition will prefer to use less words to express\n '''\n \n def fit(self, x, y=None):\n return self\n\n def transform(self, X):\n return pd.DataFrame(X).applymap(len)\n\ndef build_model():\n '''Build the machine learning model based on training data\n \n Input:\n None\n \n Output:\n None\n '''\n \n model = Pipeline([\n ('tfidfvect', TfidfVectorizer(tokenizer=tokenize, stop_words='english', ngram_range=(1,2), max_df=0.75, max_features=2000)),\n ('moc', MultiOutputClassifier(RandomForestClassifier(n_estimators=15, max_depth=None, min_samples_split=5)))\n ])\n \n return model\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n '''Print out result of classification report for each label\n \n Input:\n model: trained model\n X_test: messages for testing\n Y_test: labels for validating\n category_names: list of names of label\n \n Output:\n None\n '''\n y_pred = pd.DataFrame(model.predict(X_test), columns = category_names)\n \n tot_acc = 0\n tot_f1 = 0\n for i in category_names:\n print(i, 'accuracy: {:.2f}'.format(accuracy_score(Y_test.loc[:, i], y_pred.loc[:, i])),'\\n',\n classification_report(Y_test.loc[:, i], y_pred.loc[:, i]),'\\n')\n tot_acc += accuracy_score(Y_test.loc[:, i], y_pred.loc[:, i])\n tot_f1 += precision_recall_fscore_support(Y_test.loc[:, i], y_pred.loc[:, i], average = 'weighted')[2]\n print('The average accuracy score is', round(tot_acc/len(category_names),4), ', average f1-score is', round(tot_f1/len(category_names),4))\n\ndef save_model(model, model_filepath):\n '''Save \n \n Input:\n model: trained model\n X_test: messages for testing\n Y_test: labels for validating\n category_names: list of names of label\n \n Output:\n None\n '''\n \n joblib.dump(model, f'{model_filepath}')\n\ndef main():\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n \n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py ../data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()" } ]
2
singhtanmay20/PoliticalTrend
https://github.com/singhtanmay20/PoliticalTrend
f5a4359e0a9ea9b8221dbf9c526f024f845fdbeb
be356bb70e1b9c22c2790b2341fe51c3fa2470fe
0a3055085a93d320240aae2a946f5be02e198e3d
refs/heads/master
2021-02-28T09:22:29.823951
2020-03-07T18:09:03
2020-03-07T18:09:03
245,544,028
0
0
null
2020-03-07T00:51:50
2020-03-07T01:22:24
2020-03-07T01:43:10
Jupyter Notebook
[ { "alpha_fraction": 0.557894766330719, "alphanum_fraction": 0.5684210658073425, "avg_line_length": 28.230770111083984, "blob_id": "6487041418c2791abf8f8d2f20d8541d3899d4a1", "content_id": "d79d3866fcc540326f4ca0c85f1289ffc4389899", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 380, "license_type": "no_license", "max_line_length": 93, "num_lines": 13, "path": "/part3/NYT/code/NYT-Mapper-COOCCUR.py", "repo_name": "singhtanmay20/PoliticalTrend", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n\"\"\"mapper.py\"\"\"\n\nimport sys\nmywords = [\"music\",\"movie\",\"first\",\"dance\",\"book\",\"television\",\"ballet\",\"film\",\"show\",\"like\"]\nfor line in sys.stdin:\n line = line.strip()\n words = line.split()\n for i in range(len(words)-1):\n if(words[i] in mywords or words[i+1] in mywords):\n\t l=\" \"\n print('%s%s%s\\t%s' % (words[i],l,words[i+1],1))\n" }, { "alpha_fraction": 0.6327077746391296, "alphanum_fraction": 0.6380696892738342, "avg_line_length": 24.689655303955078, "blob_id": "d3e7e9fdcd10dc69db4e5cc683f1b99e246221e6", "content_id": "aecba8deee79aef7a863c327999324102c444fdb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 746, "license_type": "no_license", "max_line_length": 79, "num_lines": 29, "path": "/part1/Data/COMMON-CRAWL/ccarticlescrap.py", "repo_name": "singhtanmay20/PoliticalTrend", "src_encoding": "UTF-8", "text": "import urllib.request\nfrom bs4 import BeautifulSoup\nurllist=[]\nall_articles=[]\nwith open(\"/home/tanmay/Documents/DIC/Proj2/ccrankersite.txt\") as f:\n for line in f:\n urllist.append(line)\nj=1\nfor urls in urllist:\n print(j)\n f=open('ccarticle'+str(j)+\".txt\",'w')\n article_url=urls\n try:\n soup = BeautifulSoup(urllib.request.urlopen(article_url), 'html.parser')\n except:\n continue\n soup.prettify()\n print(article_url)\n # retrieve all of the paragraph tags\n try:\n paragraphs=soup.find('article').find_all('p')\n except:\n print(\"exception occured\")\n continue\n for paragraph in paragraphs:\n f.write(paragraph.text)\n j+=1\n\n all_articles.append(article_url)\n\n" } ]
2
chrismeono1022/playing_with_dictionaries
https://github.com/chrismeono1022/playing_with_dictionaries
095486f9243272555fefa96673facec4206e8f22
38dd51ac1fce665001f10884e14cc0dd21def02d
4311f55370b90e9912797ff621f7b020171cffae
refs/heads/master
2021-01-19T17:47:27.195939
2013-03-13T01:07:55
2013-03-13T01:07:55
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5856353640556335, "alphanum_fraction": 0.591160237789154, "avg_line_length": 18.60869598388672, "blob_id": "67fad53923f8de0133a4260d858bfba258e3a226", "content_id": "f65086c2ac60e6101e0c37ad3bb863685cc7a7b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 905, "license_type": "no_license", "max_line_length": 72, "num_lines": 46, "path": "/lettercount.py", "repo_name": "chrismeono1022/playing_with_dictionaries", "src_encoding": "UTF-8", "text": "rom sys import argv\nimport string\n\nscript, filename = argv\n\ntext = open(filename)\nbook = text.read()\ntext.close()\n\nbook = book.replace('.', \" \")\nbook = book.replace(',', \" \")\nbook = book.replace('-', \" \")\nbook = book.replace(':', \" \")\nbook = book.replace('/', \" \")\nbook = book.replace('$', \" \")\nbook = book.replace('!', \" \")\nbook = book.replace('?', \" \")\nbook = book.replace(';', \" \")\nbook = book.replace('(', \" \")\nbook = book.replace(\"'\", \" \")\t\nbook = book.replace(')', \" \")\n\nbook_list = book.split()\n\nbook_dict = {}\n\nfor word in book_list: #for each word in our list of words from the book\n\tbook_dict[word] = 0 #edit book_dictionary \n\t\n\nfor word in book_list:\n\tbook_dict[word] += 1\n\nprint book_dict\n\n\n\n# length = len(book_list)\n\n# a = dict(zip([book_list], [0 * length])\n\n# print a \n\n# a = {}\n#for word in book2: #for each word in the list of words from the book\n#\ta = dict(zip([word], [0 * length]) \n\n\n" } ]
1
StevenMohr/CrimeMonitorBerlin
https://github.com/StevenMohr/CrimeMonitorBerlin
9345df73b7528751d6c798fc78036b1dd97af4a9
5375f91aab7df4c1d48fc38e01dde69583cc6f1b
1fcb9790cd25b3006d4f9a0bfafe8fb9ddd74649
refs/heads/master
2021-01-17T11:59:08.267580
2013-01-29T10:11:21
2013-01-29T10:11:21
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6733212471008301, "alphanum_fraction": 0.6814882159233093, "avg_line_length": 29.58333396911621, "blob_id": "d9531d49ecc675458fe107dd2f34f88305cc4e28", "content_id": "fe505490fe2528ee9882ee433ec233804ca22102", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1102, "license_type": "no_license", "max_line_length": 67, "num_lines": 36, "path": "/crime_main/models.py", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "from django.contrib.gis.db import models\nfrom django.contrib.gis.geos import Polygon\nfrom django.db.models.fields.related import ForeignKey\n\n# Create your models here.\n\nclass Crime(models.Model):\n districts = models.ManyToManyField('District')\n title = models.CharField(max_length = 200)\n description = models.TextField()\n pub_date = models.DateTimeField()\n full_text_link = models.URLField(max_length = 800, unique=True)\n \n #Using geo-enabled object manager\n objects = models.GeoManager()\n \nclass District(models.Model):\n name = models.CharField(max_length = 200, unique=True)\n area_id = models.IntegerField()\n way = models.GeometryField(blank=True, null=True)\n\n def _polygon_wkt(self):\n poly = self.way.wkt\n return poly.replace('LINESTRING', 'POLYGON')\n\n polygon_wkt = property(_polygon_wkt)\n\n def _crime_ratio(self):\n crimes = self.crime_set.count()\n crime_total = Crime.objects.count()\n return crimes / float(crime_total)\n\n crime_ratio = property(_crime_ratio)\n\n def __unicode__(self):\n return self.name\n\n" }, { "alpha_fraction": 0.7837837934494019, "alphanum_fraction": 0.837837815284729, "avg_line_length": 9.714285850524902, "blob_id": "b0882507f4ae97ce4fa85554c73c16a6cd8dd104", "content_id": "24e0778a23e124c532be794e7859c2c068e10c0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 74, "license_type": "no_license", "max_line_length": 15, "num_lines": 7, "path": "/requirements.txt", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "fabric\ndjango>=1.4.3\ngeopy\ndjango_nose\nfeedparser\npsycopg2\npython-dateutil" }, { "alpha_fraction": 0.7735849022865295, "alphanum_fraction": 0.7735849022865295, "avg_line_length": 25.5, "blob_id": "06d68a84d456c416de02524b246e8dcdec78c27e", "content_id": "1f2a74f9bdb17944c25a36b088abf5deb798df73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 106, "license_type": "no_license", "max_line_length": 44, "num_lines": 4, "path": "/init_db.sql", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "\\c crimemonitor;\nCreate USER crime PASSWORD 'crime';\ngrant ALL on DATABASE crimemonitor to crime;\n\\q\n" }, { "alpha_fraction": 0.6459143757820129, "alphanum_fraction": 0.6498054265975952, "avg_line_length": 20.41666603088379, "blob_id": "2dda3c87599d0e8f0f0b4dec6c2a09d4048bb923", "content_id": "87928c8afc7f3f5cc5ecdfd0c8cd9e7866636a9c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 257, "license_type": "no_license", "max_line_length": 45, "num_lines": 12, "path": "/crime_main/templatetags/list.py", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "__author__ = 'steven'\n\nfrom django.template import Library\n\nregister = Library()\n\[email protected]_tag\ndef pretty_list(item_list):\n return_string = \"\"\n for item in item_list:\n return_string += unicode(item) + \", \"\n return return_string[:-2]\n" }, { "alpha_fraction": 0.6735436916351318, "alphanum_fraction": 0.6759708523750305, "avg_line_length": 36.45454406738281, "blob_id": "ef42adad1cc36b190093313ad66e7f792c098f1d", "content_id": "5cf005c36665120908c1c4333be5e5e350b4165f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 824, "license_type": "no_license", "max_line_length": 67, "num_lines": 22, "path": "/crime_main/views.py", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "# Create your views here.\nfrom django.db.models.aggregates import Count\nfrom django.views.generic import ListView\nfrom django.views.generic.base import TemplateView\nfrom crime_main.models import Crime, District\n\nclass MainView(ListView):\n template_name = \"main.html\"\n queryset = Crime.objects.all()\n context_object_name = \"crimes\"\n\n def get_context_data(self, **kwargs):\n arguments = kwargs\n districts = District.objects.all()\n kwargs['districts'] = districts\n geo_districts = [x.way for x in districts ]\n berlin_borders = geo_districts[0]\n for geo in geo_districts[1:]:\n berlin_borders = berlin_borders.union(geo)\n kwargs['center'] = berlin_borders.centroid\n kwargs['crimes'] = Crime.objects.all().order_by('pub_date')\n return arguments\n" }, { "alpha_fraction": 0.6568915247917175, "alphanum_fraction": 0.6847507357597351, "avg_line_length": 21.2391300201416, "blob_id": "799260c076764c88e79b76787c84b3bfe12a7590", "content_id": "99437d24c91fb6b594dc8d2072e7e7a01adcfdda", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2046, "license_type": "no_license", "max_line_length": 136, "num_lines": 92, "path": "/crime_main/static/js/map.js", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "// Generated by CoffeeScript 1.3.3\nvar in_options, layer_style, style_blue, style_green, style_orange, style_red, style_red1, style_red2, style_yellow, to_district_vector;\n\nin_options = {\n internalProjection: new OpenLayers.Projection('EPSG:900913'),\n externalProjection: new OpenLayers.Projection('EPSG:4326')\n};\n\nto_district_vector = function(wkt, ratio) {\n var feature, format;\n format = new OpenLayers.Format.WKT(in_options);\n feature = format.read(wkt);\n if (ratio < 0.05) {\n feature.style = style_green;\n }\n if (ratio > 0.05) {\n feature.style = style_yellow;\n }\n if (ratio > 0.10) {\n feature.style = style_orange;\n }\n if (ratio > 0.15) {\n feature.style = style_red;\n }\n if (ratio > 0.20) {\n feature.style = style_red1;\n }\n if (ratio > 0.25) {\n feature.style = style_red2;\n }\n return feature;\n};\n\nlayer_style = OpenLayers.Util.extend({}, OpenLayers.Feature.Vector.style['default']);\n\nlayer_style.fillOpacity = 0.2;\n\nlayer_style.graphicOpacity = 1;\n\nstyle_blue = OpenLayers.Util.extend({}, layer_style);\n\nstyle_blue.strokeColor = \"blue\";\n\nstyle_blue.fillColor = \"blue\";\n\nstyle_green = OpenLayers.Util.extend({}, style_blue);\n\nstyle_green.strokeColor = \"green\";\n\nstyle_green.fillColor = \"green\";\n\nstyle_green.fillOpacity = \"0.4\";\n\nstyle_red = OpenLayers.Util.extend({}, style_blue);\n\nstyle_red.strokeColor = \"red\";\n\nstyle_red.fillColor = \"red\";\n\nstyle_red.fillOpacity = \"0.4\";\n\nstyle_red1 = OpenLayers.Util.extend({}, style_blue);\n\nstyle_red1.strokeColor = \"red\";\n\nstyle_red1.fillColor = \"red\";\n\nstyle_red1.fillOpacity = \"0.7\";\n\nstyle_red2 = OpenLayers.Util.extend({}, style_blue);\n\nstyle_red2.strokeColor = \"red\";\n\nstyle_red2.fillColor = \"red\";\n\nstyle_red2.fillOpacity = \"1\";\n\nstyle_yellow = OpenLayers.Util.extend({}, style_blue);\n\nstyle_yellow.strokeColor = \"yellow\";\n\nstyle_yellow.fillColor = \"yellow\";\n\nstyle_yellow.fillOpacity = \"0.4\";\n\nstyle_orange = OpenLayers.Util.extend({}, style_blue);\n\nstyle_orange.strokeColor = \"orange\";\n\nstyle_orange.fillColor = \"orange\";\n\nstyle_orange.fillOpacity = \"0.4\";\n" }, { "alpha_fraction": 0.6913793087005615, "alphanum_fraction": 0.6965517401695251, "avg_line_length": 40.5, "blob_id": "7fc7a114b2116a368580be3e9655c70e713575cf", "content_id": "8e380ab028e52f4ee73e528d63dd724c6017d622", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 580, "license_type": "no_license", "max_line_length": 94, "num_lines": 14, "path": "/fabfile.py", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "from fabric.context_managers import lcd\nfrom fabric.operations import sudo, local\n\n__author__ = 'steven'\n\n\ndef install_system_local():\n local('sudo su -c \"createdb -T template_postgis crimemonitor\" postgres')\n local('sudo su -c \"psql -f init_db.sql\" postgres')\n local('python manage.py syncdb')\n with lcd('/tmp/'):\n local('wget http://download.geofabrik.de/openstreetmap/europe/germany/berlin.osm.bz2')\n local('osm2pgsql -d crimemonitor -U crime -W -s berlin.osm.bz2')\n local('sudo su -c \"psql -f crime_main/sql/district_.sql crimemonitor\" postgres')" }, { "alpha_fraction": 0.7322834730148315, "alphanum_fraction": 0.7322834730148315, "avg_line_length": 41.33333206176758, "blob_id": "6f226d2c8a490749b2cdc38a7c3672ea84dd4980", "content_id": "48a2deabaf4bd0f8dc90c94c0a75aa919f7043c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 254, "license_type": "no_license", "max_line_length": 126, "num_lines": 6, "path": "/README.md", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "CrimeMonitorBerlin\n==================\n\nUniversity project for crawling the RSS press feed of Berlin's police and showing a kind of heat map based on that crime data.\n\nThanks to Thomas Myrman for this [background pattern](http://subtlepatterns.com/cream_dust/).\n" }, { "alpha_fraction": 0.726681113243103, "alphanum_fraction": 0.7570499181747437, "avg_line_length": 45.099998474121094, "blob_id": "a65c06e7f5de833b98eddf99384e0709721abcff", "content_id": "8bc5680b62cfe88347440d2674d7ff620aa71c22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 461, "license_type": "no_license", "max_line_length": 137, "num_lines": 10, "path": "/install_cm.sh", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "apt-get install postgresql-9.1 postgres-server-dev-9.1 libxml2-dev build-essential g++ binutils python-psycopg2 python-imaging python-pip\npip install -r requirements.txt\ncp init_db.sql /tmp\ncd /tmp\n\nsu -c \"psql -f init_db.sql\" postgres \n\n./osm2pgsql -d crimemonitor -U crime -W -s -C 200 ../Downloads/berlin.osm\necho \"Change user auth method from PEER to md5 in pg_hba.conf\"\necho \"Patch GeoDjango with fix for proper creation with PostGIS 2.0 (see JIRA REG-2)\"\n" }, { "alpha_fraction": 0.5321229100227356, "alphanum_fraction": 0.5386406183242798, "avg_line_length": 40.7843132019043, "blob_id": "19d46bee7ae6e3ab5d6912a41e57bc8dee0f105e", "content_id": "005e2da24c2906850a2b9498000a6ec0c2a27022", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2148, "license_type": "no_license", "max_line_length": 107, "num_lines": 51, "path": "/crime_main/management/commands/import_feed.py", "repo_name": "StevenMohr/CrimeMonitorBerlin", "src_encoding": "UTF-8", "text": "'''\nCreated on 07.11.2012\n\n@author: steven\n'''\nimport feedparser\nfrom django.core.management.base import BaseCommand\nfrom crime_main.models import Crime, District\nimport datetime\nfrom dateutil import tz\nfrom django.db.utils import IntegrityError\nfrom django.db import transaction \n\[email protected]_manually\nclass Command(BaseCommand):\n def handle(self, *args, **kwargs):\n print \"Starting feed import ...\"\n feed = feedparser.parse('http://www.berlin.de/polizei/presse-fahndung/_rss_presse.xml')\n for entry in feed.entries:\n title_parts = entry.title.split(' - ')\n if len(title_parts) > 1:\n title = title_parts[0]\n district_literal = title_parts[-1].strip()\n pub_date = datetime.datetime(*entry.published_parsed[:7], tzinfo=tz.gettz(\"Europe/Berlin\"))\n crime = Crime.objects.get_or_create(full_text_link=entry.link)[0]\n crime.title = title\n crime.full_text_link = entry.link\n pub_date = datetime.datetime(*entry.published_parsed[:7], tzinfo=tz.gettz(\"Europe/Berlin\"))\n try:\n crime.save()\n except IntegrityError:\n transaction.rollback()\n continue\n try:\n district = District.objects.get(name = district_literal)\n except District.DoesNotExist:\n district = None\n if district is not None:\n crime.districts.add(district)\n else:\n \n districts_literal = district_literal.split('/')\n \n for district_literal in districts_literal:\n try:\n district = District.objects.get(name = district_literal.strip())\n crime.districts.add(district)\n except District.DoesNotExist:\n print u\"'{}' not found...\".format(district_literal)\n crime.save()\n transaction.commit()\n \n" } ]
10
bbbrtk/ocr-dictionary
https://github.com/bbbrtk/ocr-dictionary
a9f67785f16836842db82290bbe18997e25ce12a
a046a5b76a491622b6438a6c8301eb147e71d970
71741345606f3a32f2bb319e4032c734f6df9dd9
refs/heads/master
2020-03-26T18:53:51.583253
2018-08-18T16:58:30
2018-08-18T16:58:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5625413656234741, "alphanum_fraction": 0.5744540095329285, "avg_line_length": 21.893939971923828, "blob_id": "e763240e53fede5f13e1e0adaccd99c4e43f2c61", "content_id": "36fb0791f1f0470b2857c604df9cbc15ee7098ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1511, "license_type": "no_license", "max_line_length": 89, "num_lines": 66, "path": "/script.py", "repo_name": "bbbrtk/ocr-dictionary", "src_encoding": "UTF-8", "text": "from PIL import Image\nimport pytesseract\nimport re\nimport datetime\n\n\npytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract'\n\nlist_of_words_1 = []\nlist_of_words_2 = []\n\ndef read_lists():\n with open('list_of_words_1.txt', 'r') as f:\n list_of_words_1 = [line.strip() for line in f]\n\n with open('list_of_words_2.txt', 'r') as f:\n list_of_words_2 = [line.strip() for line in f]\n\ndef remove_non_ascii(text):\n return ''.join([i if ord(i) < 128 else ' ' for i in text])\n\n\ndef convert_and_split(text):\n for item in list_of_words_1:\n text = text.replace(item,'')\n # print(text)\n words = text.split()\n for i in range(2):\n for item in words:\n if len(item)<4 or (item in list_of_words_2):\n words.remove(item)\n\n return words\n\ndef save_to_txt(list):\n now = datetime.datetime.now()\n name = now.strftime(\"%Y-%m-%d_%H-%M.txt\")\n file = open(name, 'w')\n for item in list:\n file.write(\"%s\\n\" % item)\n\n\npath = 'media/test2.png'\ntext_ocr = pytesseract.image_to_string(Image.open(path))\n\n\nlist_of_words_1 = [\n 'Thumac','Tlumac','Ttun','Ttum','Tumacz',\n '...','-','@','.','\\n'\n ]\n\nlist_of_words_2 = [\n 'p','po','pol','pols','a','an','ang','angi','end',\n 'Tuma','Thur','Thum:','Thum','Thu','Thun','Tum'\n ]\n\n\ndef main():\n text = text_ocr\n text = remove_non_ascii(text)\n words = convert_and_split(text)\n print(words)\n save_to_txt(words)\n\nif __name__ == '__main__':\n main()\n" } ]
1
h3idan/spider-knownsec
https://github.com/h3idan/spider-knownsec
018f2173c09c8af0c3e37265da7d15c54caf2678
07a48a9935d4c6a7fca121035f85cdd6b66c1f46
0919b74a9cb8ae5a326bb02c7d9b52331c6ea83f
refs/heads/master
2020-05-20T05:51:21.220904
2013-05-14T14:29:26
2013-05-14T14:29:26
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.47287341952323914, "alphanum_fraction": 0.4896182119846344, "avg_line_length": 25.192981719970703, "blob_id": "1084534e97996157cd679a67a0380069bdf98ca9", "content_id": "c54bc2d4872f10e87a2aa909ad9159d76f5e1735", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1541, "license_type": "no_license", "max_line_length": 93, "num_lines": 57, "path": "/WebPage.py", "repo_name": "h3idan/spider-knownsec", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n#**********************************\n# author: h3idan\n# datetime: 2012-12-26 15:58\n#**********************************\n\n\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport chardet\nimport re\nimport logging\nimport traceback\n\n\nlog = logging.getLogger('claw.GetPage')\n\nclass GetPage():\n '''\n 读取到url的htmlsource,解析出该htmlsource中所有href\n '''\n def __init__(self, urltuples):\n self.urltuples = urltuples\n self.urls = []\n\n\n def get_html(self):\n url = self.urltuples[1]\n try:\n html = urllib2.urlopen(url, timeout=15).read()\n except Exception, e:\n print \"error: %s'\\n url: %s'\" % (e, url)\n log.debug(\"URL: %s\" % url + traceback.format_exc())\n \n else:\n return html\n \n \n #def get_url(self):\n # id, url = self.urltuples\n # try:\n # html = urllib2.urlopen(url, timeout=15).read()\n # except Exception, e:\n # print \"error: %s'\\n url: %s'\" % (e, url)\n # else:\n # soup = BeautifulSoup(html)\n # tag_a = soup.findAll('a', onclick=None, href=re.compile('^http:|^/')) # 查找a标签,\n # for i in tag_a:\n # s = i['href'].rstrip('/')\n # href = s.encode('utf-8')\n # if href.startswith('/'): # 解决锚点的问题\n # href = url + href\n # self.urls.append((id+1, href))\n # urllist = list(set(self.urls))\n # return urllist\n" } ]
1
VikramGrover/flappy-bird-ai
https://github.com/VikramGrover/flappy-bird-ai
d34ba1f26856176325c973a37e051bd3abdc88de
5af15e13f66ce578e6e7a6a4d17fed81c810678e
10ed128de1d0c25c427369c7488b94736ee32113
refs/heads/main
2023-04-08T12:06:35.872787
2021-04-18T05:51:34
2021-04-18T05:51:34
324,850,621
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6685134172439575, "alphanum_fraction": 0.72114497423172, "avg_line_length": 24.186046600341797, "blob_id": "c8a493b7ecfe235273bdbb343efdba8f3c5512a3", "content_id": "19e3b0b2b1c27355d5da28cb4e28520db9cf259a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1083, "license_type": "no_license", "max_line_length": 71, "num_lines": 43, "path": "/constants.py", "repo_name": "VikramGrover/flappy-bird-ai", "src_encoding": "UTF-8", "text": "import os\nimport pygame\nfrom preprocess import load_images\n\n# general constants\nAI_SHELL_ARGUMENT = \"-ai\"\nAI_MAX_RUNS = 100\nAI_TINT = (255, 255, 255, 210)\n\n# images-related constants\nIMG_DICT = load_images()\nBIRD_IMGS = [IMG_DICT['fb1'], IMG_DICT['fb2'], IMG_DICT['fb3']]\nGROUND_IMG = IMG_DICT['ground']\nPIPE_IMG = IMG_DICT['pipe']\nSCENE_IMG = IMG_DICT['scene']\n\n# environment-related constants - tweak these to change the environment\nGROUND_AND_PIPE_VELOCITY = 2\nGROUND_HEIGHT = 50\nPIPE_STARTING_DIST = 700\nNEW_PIPE_DIST = 10\nFRAMERATE = 130\nPIPE_GAP = 140\nGRAVITATIONAL_VEL = 1.8\nMAX_GRAVITATIONAL_VEL = 5.2\nHIGHEST_PIPE_GAP = 50\nLOWEST_PIPE_GAP = 350\n\n# bird-related constants - tweak these to change bird gameplay\nBIRD_ROT_VEL = 1\nBIRD_MAX_ROT = 25\nBIRD_WING_FLAP_RATE = 5\nBIRD_JUMP_VEL = -3.8\nBIRD_JUMP_BOOST = -2.7\nHUMAN_JUMP_COOLDOWN = 20\nAI_JUMP_COOLDOWN = 25\n\n# window-realted constants\nWINDOW_WIDTH = SCENE_IMG.get_width()\nWINDOW_HEIGHT = SCENE_IMG.get_height()\npygame.font.init()\nSCORE_FONT = pygame.font.SysFont(\"arial\", 50)\nSTATS_FONT = pygame.font.SysFont(\"arial\", 30)\n" }, { "alpha_fraction": 0.5093085169792175, "alphanum_fraction": 0.5292553305625916, "avg_line_length": 21.787878036499023, "blob_id": "0f297ec4f5f844a589eb67aa03f6a0cfc61b37d2", "content_id": "0efef51902f4c13fc16b79be855a92b2c937430b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 752, "license_type": "no_license", "max_line_length": 56, "num_lines": 33, "path": "/ground.py", "repo_name": "VikramGrover/flappy-bird-ai", "src_encoding": "UTF-8", "text": "import pygame\nfrom constants import *\n\n\nclass Ground:\n VELOCITY = GROUND_AND_PIPE_VELOCITY\n WIDTH = GROUND_IMG.get_width()\n IMG = GROUND_IMG\n\n def __init__(self, y):\n self.y = y\n self.x1 = 0\n self.x2 = self.WIDTH\n\n def move(self):\n \"\"\"\n Moves the ground and handles stitching of images\n \"\"\"\n self.x1 -= self.VELOCITY\n self.x2 -= self.VELOCITY\n\n if self.x1 + self.WIDTH < 0:\n self.x1 = self.x2 + self.WIDTH\n\n if self.x2 + self.WIDTH < 0:\n self.x2 = self.x1 + self.WIDTH\n\n def draw(self, window):\n \"\"\"\n Draws the ground\n \"\"\"\n window.blit(self.IMG, (self.x1, self.y))\n window.blit(self.IMG, (self.x2, self.y))\n" }, { "alpha_fraction": 0.4823490083217621, "alphanum_fraction": 0.49539902806282043, "avg_line_length": 30.962566375732422, "blob_id": "967af416999f8b3f05093ca9f02310572b5d684e", "content_id": "9e036dc24c4a3af99e5fc68cdcd0bb259335913c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5977, "license_type": "no_license", "max_line_length": 133, "num_lines": 187, "path": "/main.py", "repo_name": "VikramGrover/flappy-bird-ai", "src_encoding": "UTF-8", "text": "import time\nimport pygame\nimport neat\nimport sys\nfrom constants import *\nfrom flappy_bird import FlappyBird\nfrom pipe_pair import PipePair\nfrom ground import Ground\n\ngen_num = 0\n\n\ndef draw_game(birds, dead_birds, ground, pipe_pairs, window, total_score, gen_num):\n \"\"\"\n Draws the whole screen\n \"\"\"\n window.blit(SCENE_IMG, (0, 0))\n\n for pipe_pair in pipe_pairs:\n pipe_pair.draw(window)\n\n score = SCORE_FONT.render(str(total_score), 1, (255, 255, 255))\n window.blit(score, (WINDOW_WIDTH//2 -\n score.get_width()//2, 10))\n\n for bird in birds:\n bird.draw(window)\n\n for dead_bird in dead_birds:\n dead_bird.draw(window)\n\n ground.draw(window)\n\n if gen_num > 0:\n gen = STATS_FONT.render(\n \"Gen: \" + str(gen_num), 1, (255, 255, 255))\n window.blit(gen, (WINDOW_WIDTH//2 -\n gen.get_width()//2, WINDOW_HEIGHT - 100))\n\n pygame.display.update()\n\n\ndef main(genomes, config, ai_enabled=True):\n \"\"\"\n Main function that has the game loop and calls draw function\n \"\"\"\n global gen_num\n\n if ai_enabled:\n gen_num += 1\n\n birds = [FlappyBird(\n WINDOW_WIDTH//3 - BIRD_IMGS[0].get_width()//2, WINDOW_HEIGHT//2 - BIRD_IMGS[0].get_height()//2, False)]\n dead_birds = []\n\n if ai_enabled:\n nets = []\n ge = []\n birds.clear()\n generated_tints = []\n\n for _, gen in genomes:\n net = neat.nn.FeedForwardNetwork.create(gen, config)\n nets.append(net)\n\n birds.append(FlappyBird(\n WINDOW_WIDTH//3 - BIRD_IMGS[0].get_width()//2, WINDOW_HEIGHT//2 - BIRD_IMGS[0].get_height()//2, ai_enabled, AI_TINT))\n gen.fitness = 0\n ge.append(gen)\n\n window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\n ground = Ground(WINDOW_HEIGHT - GROUND_HEIGHT)\n ticker = pygame.time.Clock()\n total_score = 0\n pipe_pairs = [PipePair(WINDOW_WIDTH + PIPE_STARTING_DIST)]\n\n # main game loop\n while (len(birds) and ai_enabled) or (not ai_enabled):\n ticker.tick(FRAMERATE)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n quit()\n elif event.type == pygame.KEYDOWN and not ai_enabled:\n if event.key == pygame.K_SPACE and len(birds):\n birds[0].jump()\n elif event.key == pygame.K_r:\n birds = [FlappyBird(\n WINDOW_WIDTH//3 - BIRD_IMGS[0].get_width()//2, WINDOW_HEIGHT//2 - BIRD_IMGS[0].get_height()//2, False)]\n dead_birds = []\n total_score = 0\n pipe_pairs = [PipePair(WINDOW_WIDTH + PIPE_STARTING_DIST)]\n continue\n\n for i, dead_bird in enumerate(dead_birds):\n if dead_bird.y < WINDOW_HEIGHT - GROUND_HEIGHT - BIRD_IMGS[0].get_height():\n dead_bird.move()\n elif dead_bird.x + BIRD_IMGS[0].get_width() < 0:\n dead_birds.pop(i)\n\n if len(birds):\n pipe_ind = 0\n if len(pipe_pairs) > 1 and birds[0].x > pipe_pairs[0].x + PIPE_IMG.get_width():\n pipe_ind = 1\n\n for i, bird in enumerate(birds):\n if bird.y < WINDOW_HEIGHT - GROUND_HEIGHT - BIRD_IMGS[0].get_height():\n bird.move()\n\n if ai_enabled:\n ge[i].fitness += 0.1\n out = nets[i].activate((bird.y, abs(\n bird.y - pipe_pairs[pipe_ind].height), abs(bird.y - pipe_pairs[pipe_ind].bottom)))\n\n if out[0] > 0.5:\n bird.jump()\n elif bird.alive:\n bird.die()\n dead_birds.append(birds.pop(i))\n if ai_enabled:\n nets.pop(i)\n ge.pop(i)\n\n new_pipe = False\n x = 0\n while x < len(pipe_pairs) and len(birds):\n pipe_pair = pipe_pairs[x]\n\n for i, bird in enumerate(birds):\n if pipe_pair.collision(bird):\n bird.die()\n dead_birds.append(birds.pop(i))\n if ai_enabled:\n ge[i].fitness -= 1\n nets.pop(i)\n ge.pop(i)\n\n if not pipe_pair.passed and pipe_pair.x < bird.x:\n pipe_pair.passed = True\n new_pipe = True\n\n if pipe_pair.x + PIPE_IMG.get_width() < 0:\n pipe_pairs.pop(x)\n continue\n\n pipe_pair.move()\n x += 1\n\n if new_pipe:\n total_score += 1\n pipe_pairs.append(PipePair(WINDOW_WIDTH + NEW_PIPE_DIST))\n\n if ai_enabled:\n for gen in ge:\n gen.fitness += 5\n\n if len(birds):\n ground.move()\n\n for dead_bird in dead_birds:\n dead_bird.move_horizontally()\n\n draw_game(birds, dead_birds, ground,\n pipe_pairs, window, total_score, gen_num)\n\n\ndef setup_config(conf_path):\n conf = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,\n neat.DefaultSpeciesSet, neat.DefaultStagnation,\n conf_path)\n\n p = neat.Population(conf)\n p.add_reporter(neat.StdOutReporter(True))\n stats = neat.StatisticsReporter()\n p.add_reporter(stats)\n\n winner = p.run(main, AI_MAX_RUNS)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) > 1 and sys.argv[1] == \"-ai\":\n curr_dir = os.path.dirname(__file__)\n conf_path = os.path.join(curr_dir, \"neat-config.txt\")\n setup_config(conf_path)\n else:\n main(None, None, False)\n" }, { "alpha_fraction": 0.5894001126289368, "alphanum_fraction": 0.5917297601699829, "avg_line_length": 31.39622688293457, "blob_id": "a85142d4f88ff774e7942077d93808b9dd0689a3", "content_id": "20877f2572fb410408f60271fca8e7838e6f60ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1717, "license_type": "no_license", "max_line_length": 108, "num_lines": 53, "path": "/pipe_pair.py", "repo_name": "VikramGrover/flappy-bird-ai", "src_encoding": "UTF-8", "text": "import pygame\nimport random\nfrom constants import *\n\n\nclass PipePair:\n GAP = PIPE_GAP\n VELOCITY = GROUND_AND_PIPE_VELOCITY\n TOP_PIPE_IMG = pygame.transform.flip(PIPE_IMG, False, True)\n BOTTOM_PIPE_IMG = PIPE_IMG\n\n def __init__(self, x):\n self.x = x\n self.height = random.randrange(HIGHEST_PIPE_GAP, LOWEST_PIPE_GAP)\n self.top = self.height - self.TOP_PIPE_IMG.get_height()\n self.bottom = self.height + self.GAP\n self.passed = False\n\n def move(self):\n \"\"\"\n Moves the PipePair at a set velocity\n \"\"\"\n self.x -= self.VELOCITY\n\n def draw(self, window):\n \"\"\"\n Draws the PipePair\n \"\"\"\n window.blit(self.TOP_PIPE_IMG, (self.x, self.top))\n window.blit(self.BOTTOM_PIPE_IMG, (self.x, self.bottom))\n\n def collision(self, bird):\n \"\"\"\n Detects collisions between bird and a PipePair\n \"\"\"\n bird_img_mask = bird.image_mask()\n top_pipe_mask = pygame.mask.from_surface(self.TOP_PIPE_IMG)\n bottom_pipe_mask = pygame.mask.from_surface(self.BOTTOM_PIPE_IMG)\n\n if ((bird.x + bird.IMGS[0].get_width()) - self.x) >= 0 and (bird.y + bird.IMGS[0].get_height()) < 0:\n # player is trying to bypass the pipes from the top\n return True\n\n bird_top_pipe_offset = (self.x - bird.x, self.top - round(bird.y))\n bird_bottom_pipe_offset = (\n self.x - bird.x, self.bottom - round(bird.y))\n\n top_overlap = bird_img_mask.overlap(\n top_pipe_mask, bird_top_pipe_offset)\n bottom_overlap = bird_img_mask.overlap(\n bottom_pipe_mask, bird_bottom_pipe_offset)\n\n return (top_overlap or bottom_overlap)\n" }, { "alpha_fraction": 0.648409903049469, "alphanum_fraction": 0.6554770469665527, "avg_line_length": 27.299999237060547, "blob_id": "68c4a4efa38f0179e449a29dbfd6297b0e2f6480", "content_id": "b6c930efce3eeef5b26530a9a69fe4e218f12bc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 566, "license_type": "no_license", "max_line_length": 91, "num_lines": 20, "path": "/preprocess.py", "repo_name": "VikramGrover/flappy-bird-ai", "src_encoding": "UTF-8", "text": "import os\nimport pygame\n\nIMG_FOLDER_NAME = \"imgs\"\nIMG_SIZE_MULTIPLIER = 1.3\n\n\ndef load_images():\n \"\"\"\n Loads all the images from the \"imgs\" folder in the directory into a python dict\n \"\"\"\n output = {}\n file_names = [file for file in os.listdir(os.path.join(\n IMG_FOLDER_NAME)) if os.path.isfile(os.path.join(IMG_FOLDER_NAME, file))]\n\n for name in file_names:\n output[os.path.splitext(name)[0]] = pygame.transform.rotozoom(\n pygame.image.load(os.path.join(IMG_FOLDER_NAME, name)), 0, IMG_SIZE_MULTIPLIER)\n\n return output\n" }, { "alpha_fraction": 0.6768292784690857, "alphanum_fraction": 0.7134146094322205, "avg_line_length": 26.33333396911621, "blob_id": "4d5304e9a39eaec3fd0ec068edbda497d25380f2", "content_id": "f62b90e2f96e919e912721eb6114df6a1dd16825", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 164, "license_type": "no_license", "max_line_length": 60, "num_lines": 6, "path": "/README.md", "repo_name": "VikramGrover/flappy-bird-ai", "src_encoding": "UTF-8", "text": "# flappy_bird_ai\nA flappy bird AI created using Python\n\n### Here is a demo of the AI playing the game\n\n<img src=\"flappybird_ai_demo.gif\" width=\"800\" height=\"535\"/>\n" }, { "alpha_fraction": 0.5440895557403564, "alphanum_fraction": 0.5539175271987915, "avg_line_length": 29.781513214111328, "blob_id": "a1f9da3dd2636fc00ac448f3ccd14cc6c0b1ce96", "content_id": "8fb363bb33c1a6aa22f2fd1bc54c4fc0672cdf94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3663, "license_type": "no_license", "max_line_length": 84, "num_lines": 119, "path": "/flappy_bird.py", "repo_name": "VikramGrover/flappy-bird-ai", "src_encoding": "UTF-8", "text": "import pygame\nfrom constants import *\n\n\nclass FlappyBird:\n IMGS = BIRD_IMGS\n MAX_ROTATION = BIRD_MAX_ROT\n ROTATION_VEL = BIRD_ROT_VEL\n JUMP_VEL = BIRD_JUMP_VEL\n JUMP_VEL_BOOST = BIRD_JUMP_BOOST\n\n def __init__(self, x, y, ai_enabled, tint=None):\n self.x = x\n self.y = y\n self.start_height = self.y\n self.curr_rotation = 0\n self.jump_tick = 0.0\n self.curr_vel = 0.0\n self.img_tick = 0\n if ai_enabled:\n self.cooldown = AI_JUMP_COOLDOWN\n else:\n self.cooldown = HUMAN_JUMP_COOLDOWN\n\n self.jump_cooldown_tick = self.cooldown\n self.img_index = 1\n self.flapping_dir = 0\n self.curr_img = self.IMGS[1]\n self.alive = True\n self.tint = tint\n\n def jump(self):\n \"\"\"\n Performs a jump on the bird\n \"\"\"\n if self.jump_cooldown_tick >= self.cooldown:\n self.curr_vel = self.JUMP_VEL\n self.jump_tick = 0.0\n self.flapping_dir = 1\n self.img_index = 0\n self.img_tick = 0\n self.jump_cooldown_tick = 0\n self.start_height = self.y\n\n def move(self):\n \"\"\"\n Moves the bird horizontally, also takes care of the rotation\n \"\"\"\n self.jump_tick += 0.01\n if self.jump_cooldown_tick < self.cooldown:\n self.jump_cooldown_tick += 1\n\n self.curr_vel += (GRAVITATIONAL_VEL * self.jump_tick)\n self.curr_vel = min(self.curr_vel, MAX_GRAVITATIONAL_VEL)\n displacement = self.curr_vel * self.jump_tick\n\n if displacement < 0:\n displacement += self.JUMP_VEL_BOOST\n\n self.y += displacement\n\n if displacement < 0:\n # moving up or still above the starting height\n self.curr_rotation = max(self.curr_rotation, self.MAX_ROTATION)\n elif self.curr_rotation > -90:\n # moving down\n self.curr_rotation -= self.ROTATION_VEL\n\n def move_horizontally(self):\n \"\"\"\n Move the FlappyBird horizontally, only called when a bird is dead in AI mode\n \"\"\"\n self.x -= GROUND_AND_PIPE_VELOCITY\n\n def draw(self, window):\n \"\"\"\n Draws the bird onto the window, also handles flapping of wings\n \"\"\"\n if self.alive:\n self.img_tick += 1\n\n if self.curr_rotation <= -80:\n # the bird is falling and the bird should not be flapping\n self.img_index = 1\n self.curr_img = self.IMGS[1]\n self.img_tick = 0\n elif (self.img_tick % BIRD_WING_FLAP_RATE) == 0 and self.alive:\n # need to switch image\n self.img_tick = 0\n self.img_index += self.flapping_dir\n self.curr_img = self.IMGS[self.img_index]\n\n if self.img_index <= 0 or self.img_index >= len(self.IMGS) - 1:\n # switch the direction of wing flapping\n self.flapping_dir *= -1\n\n rot_img = pygame.transform.rotate(self.curr_img, self.curr_rotation)\n centered_rect = rot_img.get_rect(\n center=self.curr_img.get_rect(topleft=(self.x, self.y)).center)\n\n if self.tint:\n rot_img = rot_img.convert_alpha()\n rot_img.fill(self.tint, None, pygame.BLEND_RGBA_MULT)\n\n window.blit(rot_img, centered_rect.topleft)\n\n def image_mask(self):\n \"\"\"\n Returns the image mask of the FlappyBird\n \"\"\"\n return pygame.mask.from_surface(self.curr_img)\n\n def die(self):\n \"\"\"\n Makes the FlappyBird die\n \"\"\"\n self.alive = False\n self.curr_vel = 0\n self.curr_img = self.IMGS[1]\n" } ]
7
GEM7318/SpotiBot
https://github.com/GEM7318/SpotiBot
2e6f44a69b3270d21bf1e4852b73bbd6648c27e7
054525185103087a1b305e0c9db50c723b580666
a2cd2dcf43a373096a5d5aef91e31b4aa9806aac
refs/heads/master
2022-12-16T21:30:55.215933
2020-09-26T18:46:23
2020-09-26T18:46:23
218,107,380
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5812115669250488, "alphanum_fraction": 0.5930640697479248, "avg_line_length": 28.31999969482422, "blob_id": "b45a1f9721d1dcfdea5a34b2539812aab627013a", "content_id": "f515630a448bbdfd73bcdf0f1ef95521ec768ca1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2278, "license_type": "no_license", "max_line_length": 79, "num_lines": 75, "path": "/utils_independent/PodcastTrueUp.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "\r\n\r\nfrom spotibot.core.objects import User as user\r\n\r\nimport requests\r\nimport os\r\nimport pandas as pd\r\nimport time\r\n\r\ngem = user.UserDBO('125393293')\r\n\r\n# Getting historical episode ID(s) --------------------------------------------\r\nhref_existing = r'https://api.spotify.com/v1/playlists/' \\\r\n r'4RKIVPzUpCbG2YJuI2Nrfv/tracks'\r\n\r\nrequest_existing = requests.get(href_existing, headers=gem.headers())\r\nresult_existing = request_existing.json()\r\n\r\nexisting_ids = \\\r\n [val.get('track').get('id') for val in result_existing.get('items')]\r\n\r\nexisting_ids\r\n\r\n# Reading in batched ID(s) ----------------------------------------------------\r\n\r\npath_to_excel = \\\r\n os.path.join(os.getcwd(), 'sandbox', 'Podcast URIs To Add.xlsx')\r\n\r\ndf = pd.read_excel(path_to_excel, header=None)\r\n\r\nto_add_uri = [val.split(':')[-1] for val in df[0]]\r\n\r\n# Combining all ID(s) ---------------------------------------------------------\r\nall_uri = to_add_uri + existing_ids\r\nall_uri = [val for val in set(all_uri)]\r\n# TODO: Add URI(s) from existing currently played episodes as well\r\n\r\n# Batching - successful API comms but not returning full array of episodes-----\r\n\r\n\r\ndef chunks(lst, n):\r\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\r\n for i in range(0, len(lst), n):\r\n yield lst[i:i + n]\r\n\r\n\r\nchunked = chunks(all_uri, 50)\r\n\r\nhref = r'https://api.spotify.com/v1/episodes'\r\n\r\nresults = []\r\nwhile chunked:\r\n data = {'ids': next(chunked)}\r\n request2 = requests.get(href, params=data, headers=gem.headers())\r\n result2 = request2.json()\r\n results.append(result2)\r\n\r\n# Single request per episode---------------------------------------------------\r\n\r\nresults = []\r\nfor i, uri in enumerate(all_uri):\r\n href2 = f'https://api.spotify.com/v1/episodes/{uri}'\r\n request = requests.get(href2, headers=gem.headers())\r\n result = request.json()\r\n results.append(result)\r\n print(f\"<{i}> completed\")\r\n time.sleep(1)\r\n\r\nepisode_dict = {k.get('id'): k.get('release_date') for k in results}\r\n\r\ndate_uri_dict = {v: [] for v in episode_dict.values()}\r\n\r\nfor k, v in episode_dict.items():\r\n subber = date_uri_dict.get(v)\r\n subber.append(k)\r\n# TODO: Add these URIs to the all podcasts played playlist in cronological\r\n# order (oldest gets added first)\r\n" }, { "alpha_fraction": 0.6229560971260071, "alphanum_fraction": 0.6261229515075684, "avg_line_length": 36.10551452636719, "blob_id": "28e9552bf3be09e0755ba05b974b7ab96a506d13", "content_id": "41fafc94460f50854c9355025da3cf059a044879", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15473, "license_type": "no_license", "max_line_length": 87, "num_lines": 417, "path": "/spotibot/core/objects/Music.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import json\nimport requests\n\nfrom spotibot.core.objects import Time\n\nfrom spotibot.core.objects.General import Image, ExternalUrl, ExternalId\n\nfrom spotibot.mongo.utils.Handlers import object_handler, get_serializable\n\n\nclass Album:\n \"\"\"Auto-generated attribute instantiation docstring for album\n object (simplified)\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n album_group (string, optional): The field is present when\n getting an artists albums. Possible values are album, single,\n compilation, appears_on. Compare to album_type this field\n represents relationship between the artist and the album.\n album_type (str): The type of the album: one of album, single,\n or compilation.\n artists (array of simplified artist objects): The artists of the\n album. Each artist object includes a link in ``href`` to more\n detailed information about the artist.\n available_markets (array of strings): The markets in which the\n album is available: ISO 3166-1 alpha-2 country codes. Note\n that an album is considered available in a market when at least\n 1 of its tracks is available in that market.\n external_urls (an external URL object): Known external URLs for\n this album.\n href (str): A link to the Web API endpoint providing full\n details of the album.\n id (str): The Spotify ID for the album.\n images (array of image objects): The cover art for the album in\n various sizes, widest first.\n name (str): The name of the album. In case of an album\n take-down, the value may be an empty string.\n release_date (str): The date the album was first released, for\n example ``1981``. Depending on the precision, it might be shown\n as ``1981-12`` or ``1981-12-15``.\n release_date_precision (str): The precision with which\n ``release_date`` value is known: ``year`` , ``month`` , or\n ``day``.\n restrictions (str): Part of the response when\n Track Relinking is applied, the original track is not available\n in the given market, and Spotify did not have any tracks to\n relink it with. The track response will still contain metadata\n for the original track, and a restrictions object containing\n the reason why the track is not available: ``\"restrictions\" :\n {\"reason\" : \"market\"}``\n type (str): The object type: album\n uri (str): The Spotify URI for the album.\n \"\"\"\n\n def __init__(self, album):\n\n self.album_type: str = object_handler(album, \"album_type\")\n\n self.artists: list = [\n Artist(artist) for artist in object_handler(album, \"artists\")\n ]\n\n self.available_markets: list = object_handler(album, \"available_markets\")\n\n self.external_urls: ExternalUrl = ExternalUrl(\n object_handler(album, \"external_urls\")\n )\n\n self.href: str = object_handler(album, \"href\")\n\n self.id: str = object_handler(album, \"id\")\n\n self.images: list = [Image(image) for image in object_handler(album, \"images\")]\n\n self.name: str = object_handler(album, \"name\")\n\n self.release_date: str = object_handler(album, \"release_date\")\n\n self.release_date_precision: str = object_handler(\n album, \"release_date_precision\"\n )\n\n self.restrictions: dict = object_handler(album, \"restrictions\")\n\n self.type: str = object_handler(album, \"type\")\n\n self.uri: str = object_handler(album, \"uri\")\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n\nclass Artist:\n \"\"\"Auto-generated attribute instantiation docstring for artist\n object (simplified)\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n external_urls (an external URL object): Known external URLs for\n this artist.\n href (str): A link to the Web API endpoint providing full\n details of the artist.\n id (str): The Spotify ID for the artist.\n name (str): The name of the artist.\n type (str): The object type: ``\"artist\"``\n uri (str): The Spotify URI for the artist.\n \"\"\"\n\n def __init__(self, artist):\n self.href: str = object_handler(artist, \"href\")\n\n self.id: str = object_handler(artist, \"id\")\n\n self.name: str = object_handler(artist, \"name\")\n\n self.type: str = object_handler(artist, \"type\")\n\n self.uri: str = object_handler(artist, \"uri\")\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n\nclass Track:\n \"\"\"Auto-generated attribute instantiation docstring for track\n object (full)\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n album (a simplified album object): The album on which the track\n appears. The album object includes a link in ``href`` to full\n information about the album.\n artists (an array of simplified artist objects): The artists who\n performed the track. Each artist object includes a link in\n ``href`` to more detailed information about the artist.\n available_markets (array of strings): A list of the countries in\n which the track can be played, identified by their ISO 3166-1\n alpha-2 code.\n disc_number (int): The disc number (usually ``1`` unless the\n album consists of more than one disc).\n duration (int): The track length in milliseconds.\n explicit (Boolean): Whether or not the track has explicit lyrics\n ( ``true`` = yes it does; ``false`` = no it does not OR\n unknown).\n external_ids (an external ID object): Known external IDs for the\n track.\n external_urls (an external URL object): Known external URLs for\n this track.\n href (str): A link to the Web API endpoint providing full\n details of the track.\n id (str): The Spotify ID for the track.\n is_playable (bool): Part of the response when Track Relinking is\n applied. If ``true`` , the track is playable in the given\n market. Otherwise ``false``.\n linked_from (a linked track object): Part of the response when\n Track Relinking is applied, and the requested track has been\n replaced with different track. The track in the\n ``linked_from`` object contains information about the\n originally requested track.\n restrictions (a restrictions object): Part of the response when\n Track Relinking is applied, the original track is not available\n in the given market, and Spotify did not have any tracks to\n relink it with. The track response will still contain metadata\n for the original track, and a restrictions object containing\n the reason why the track is not available: ``\"restrictions\" :\n {\"reason\" : \"market\"}``\n name (str): The name of the track.\n popularity (int): The popularity of the track. The value will\n be between 0 and 100, with 100 being the most popular.The\n popularity of a track is a value between 0 and 100, with 100\n being the most popular. The popularity is calculated by\n algorithm and is based, in the most part, on the total number\n of plays the track has had and how recent those plays\n are.Generally speaking, songs that are being played a lot now\n will have a higher popularity than songs that were played a lot\n in the past. Duplicate tracks (e.g. the same track from a\n single and an album) are rated independently. Artist and album\n popularity is derived mathematically from track popularity.\n Note that the popularity value may lag actual popularity by a\n few days: the value is not updated in real time.\n preview_url (str): A link to a 30 second preview (MP3 format) of\n the track. Can be ``null``\n track_number (int): The number of the track. If an album has\n several discs, the track number is the number on the specified\n disc.\n type (str): The object type: track.\n uri (str): The Spotify URI for the track.\n is_local (bool): Whether or not the track is from a local file.\n \"\"\"\n\n def __init__(self, track: dict):\n\n self.album: Album = Album(object_handler(track, \"album\"))\n\n self.artists: list = [\n Artist(artist) for artist in object_handler(track, \"artists\")\n ]\n\n self.available_markets: list = object_handler(track, \"available_markets\")\n\n self.disc_number: int = object_handler(track, \"disc_number\")\n\n self.duration: Time.Timestamp = Time.Timestamp(\n track.get(\"duration_ms\"), base=\"milliseconds\"\n )\n\n self.explicit: bool = object_handler(track, \"explicit\")\n\n self.external_ids: ExternalId = ExternalId(\n object_handler(track, \"external_ids\")\n )\n\n self.external_urls: ExternalUrl = ExternalUrl(\n object_handler(track, \"external_urls\")\n )\n\n self.href: str = object_handler(track, \"href\")\n\n self.id: str = object_handler(track, \"id\")\n\n self.is_local: bool = object_handler(track, \"is_local\")\n\n self.name: str = object_handler(track, \"name\")\n\n self.popularity: int = object_handler(track, \"popularity\")\n\n self.preview_url: str = object_handler(track, \"preview_url\")\n\n self.track_number: int = object_handler(track, \"track_number\")\n\n self.type: str = object_handler(track, \"type\")\n\n self.uri: str = object_handler(track, \"uri\")\n\n def get_duration(self) -> Time.Timestamp:\n return self.duration\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n def add_to_playlist(self, playlist_href: str, headers: dict):\n\n return requests.post(\n playlist_href, data=json.dumps({\"uris\": [self.uri]}), headers=headers\n )\n" }, { "alpha_fraction": 0.6156023144721985, "alphanum_fraction": 0.6189982295036316, "avg_line_length": 34.81418991088867, "blob_id": "e3a3fb621809d0f63404feaa62ec1462bbb6e11b", "content_id": "681df0a75e847e316b7acffdc0f96b04381b63c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10601, "license_type": "no_license", "max_line_length": 86, "num_lines": 296, "path": "/spotibot/core/objects/Podcasts.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import json\nimport requests\n\nfrom spotibot.core.objects import Time\nfrom spotibot.core.objects.General import Image, ExternalId, ExternalUrl\nfrom spotibot.mongo.utils.Handlers import object_handler, get_serializable\n\n\nclass Show:\n \"\"\"Auto-generated attribute instantiation docstring for show\n object (simplified)\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n available_markets (array of strings): A list of the countries in\n which the show can be played, identified by their ISO 3166-1\n alpha-2 code.\n copyrights (array of copyright objects): The copyright\n statements of the show.\n description (str): A description of the show.\n explicit (bool): Whether or not the show has explicit content\n (true = yes it does; false = no it does not OR unknown).\n external_urls (an external URL object): Known external URLs for\n this show.\n href (str): A link to the Web API endpoint providing full\n details of the show.\n id (str): The Spotify ID for the show.\n images (array of image objects): The cover art for the show in\n various sizes, widest first.\n is_externally_hosted (bool): True if all of the shows episodes\n are hosted outside of Spotifys CDN. This field might be\n ``null`` in some cases.\n languages (array of strings): A list of the languages used in\n the show, identified by their ISO 639 code.\n media_type (str): The media type of the show.\n name (str): The name of the show.\n publisher (str): The publisher of the show.\n type (str): The object type: show.\n uri (str): The Spotify URI for the show.\n \"\"\"\n\n def __init__(self, show):\n if show:\n self.available_markets: str = object_handler(show, \"available_markets\")\n\n self.copyrights: str = object_handler(show, \"copyrights\")\n\n self.description: str = object_handler(show, \"description\")\n\n self.explicit: str = object_handler(show, \"explicit\")\n\n self.external_urls: ExternalUrl = ExternalUrl(\n object_handler(show, \"external_urls\")\n )\n\n self.href: str = object_handler(show, \"href\")\n\n self.id: str = object_handler(show, \"id\")\n\n self.images: list = [Image(image) for image in show.get(\"images\", [None])]\n\n self.is_externally_hosted: str = object_handler(\n show, \"is_externally_hosted\"\n )\n\n self.languages: str = object_handler(show, \"languages\")\n\n self.media_type: str = object_handler(show, \"media_type\")\n\n self.name: str = object_handler(show, \"name\")\n\n self.publisher: str = object_handler(show, \"publisher\")\n\n self.type: str = object_handler(show, \"type\")\n\n self.uri: str = object_handler(show, \"uri\")\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n\nclass Episode:\n \"\"\"Auto-generated attribute instantiation docstring for episode\n object (full)\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n audio_preview_url (str): A URL to a 30 second preview (MP3\n format) of the episode. ``null`` if not available.\n description (str): A description of the episode.\n duration_ms (int): The episode length in milliseconds.\n explicit (bool): Whether or not the episode has explicit content\n (true = yes it does; false = no it does not OR unknown).\n external_urls (an external URL object): External URLs for this\n episode.\n href (str): A link to the Web API endpoint providing full\n details of the episode.\n id (str): The Spotify ID for the episode.\n images (array of image objects): The cover art for the episode\n in various sizes, widest first.\n is_externally_hosted (bool): True if the episode is hosted\n outside of Spotifys CDN.\n is_playable (bool): True if the episode is playable in the given\n market. Otherwise false.\n language (str): Note: This field is deprecated and might be\n removed in the future. Please use the ``languages`` field\n instead. The language used in the episode, identified by a ISO\n 639 code.\n languages (array of strings): A list of the languages used in\n the episode, identified by their ISO 639 code.\n name (str): The name of the episode.\n release_date (str): The date the episode was first released, for\n example ``\"1981-12-15\"``. Depending on the precision, it might\n be shown as ``\"1981\"`` or ``\"1981-12\"``.\n release_date_precision (str): The precision with which\n ``release_date`` value is known: ``\"year\"``, ``\"month\"``, or\n ``\"day\"``.\n resume_point (a resume point object): The users most recent\n position in the episode. Set if the supplied access token is a\n user token and has the scope ``user-read-playback-position``.\n show (a simplified show object): The show on which the episode\n belongs.\n type (str): The object type: ``\"episode\"``.\n uri (str): The Spotify URI for the episode.\n \"\"\"\n\n def __init__(self, episode):\n self.audio_preview_url: str = object_handler(episode, \"audio_preview_url\")\n\n self.description: str = object_handler(episode, \"description\")\n\n self.duration: Time.Timestamp = Time.Timestamp(\n episode.get(\"duration_ms\"), base=\"milliseconds\"\n )\n\n self.explicit: bool = object_handler(episode, \"explicit\")\n\n self.external_urls: ExternalUrl = ExternalUrl(\n object_handler(episode, \"external_urls\")\n )\n\n self.href: str = object_handler(episode, \"href\")\n\n self.id: str = object_handler(episode, \"id\")\n\n self.images: list = object_handler(episode, \"images\")\n\n self.is_externally_hosted: bool = object_handler(\n episode, \"is_externally_hosted\"\n )\n\n self.is_playable: bool = object_handler(episode, \"is_playable\")\n\n self.language: str = object_handler(episode, \"language\")\n\n self.languages: list = object_handler(episode, \"languages\")\n\n self.name: str = object_handler(episode, \"name\")\n\n self.release_date: str = object_handler(episode, \"release_date\")\n\n self.release_date_precision: str = object_handler(\n episode, \"release_date_precision\"\n )\n\n self.show: Show = Show(episode.get(\"show\"))\n\n self.type: str = object_handler(episode, \"type\")\n\n self.uri: str = object_handler(episode, \"uri\")\n\n def get_duration(self) -> Time.Timestamp:\n return self.duration\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n def add_to_playlist(self, playlist_href: str, headers: dict):\n\n return requests.post(\n playlist_href, data=json.dumps({\"uris\": [self.uri]}), headers=headers\n )\n" }, { "alpha_fraction": 0.5442177057266235, "alphanum_fraction": 0.5442177057266235, "avg_line_length": 38.55172348022461, "blob_id": "d659724994d38d57f5285d9995a39f059333b6a4", "content_id": "f15f67025e2eab09d2fe9da2b53a33e87963e01a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1176, "license_type": "no_license", "max_line_length": 75, "num_lines": 29, "path": "/spotibot/mongo/conn/Connector.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import configparser\r\nimport os\r\n\r\n\r\ndef get_creds(collection: str, database=\"SpotiBot\"):\r\n\r\n config = configparser.ConfigParser()\r\n config.read(os.path.join(os.getcwd(), \"mongo_creds.cfg\"))\r\n\r\n conn_str = (\r\n f\"mongodb+srv://{config.get('mongo', 'USERNAME')}:\"\r\n f\"{config.get('mongo', 'PASSWORD')}@\"\r\n f\"{config.get('mongo', 'DATABASE')}-gkkvg.mongodb\"\r\n f\".net/{config.get('mongo', 'COLLECTION')}?\"\r\n f\"retryWrites=true&w=majority\"\r\n )\r\n # TODO: Figure out why feeding the collection name as arguments results\r\n # in an invalid collection\r\n # conn_str = f\"mongodb+srv://{config.get('mongo', 'USERNAME')}:\" \\\r\n # f\"{config.get('mongo', 'PASSWORD')}@\" \\\r\n # f\"{config.get('mongo', 'DATABASE')}-gkkvg.mongodb\" \\\r\n # f\".net/{collection}?\" \\\r\n # f\"retryWrites=true&w=majority\"\r\n # conn_str = f\"mongodb+srv://{config.get('mongo', 'USERNAME')}:\" \\\r\n # f\"{config.get('mongo', 'PASSWORD')}@\" \\\r\n # f\"SpotiBot-gkkvg.mongodb\" \\\r\n # f\".net/{collection}?\" \\\r\n # f\"retryWrites=true&w=majority\"\r\n return conn_str\r\n" }, { "alpha_fraction": 0.597309410572052, "alphanum_fraction": 0.5991031527519226, "avg_line_length": 28.97222137451172, "blob_id": "44951eb0e5371ce8d343cb0b38e4236b43ad5cd3", "content_id": "68a840dd746bf88a894b6da170189b0193926079", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1115, "license_type": "no_license", "max_line_length": 77, "num_lines": 36, "path": "/spotibot/mongo/core/objects/Activity.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "from mongoengine import *\r\n\r\nfrom spotibot.mongo.conn import Connector\r\n\r\nfrom spotibot.mongo.objects import General as genmongo\r\n\r\n\r\nclass Activity:\r\n def __init__(self, spot_obj):\r\n\r\n playback = spot_obj.pop(\"playback\")\r\n\r\n self.current = genmongo.Current().from_json(spot_obj.json)\r\n\r\n current_typ = spot_obj.get(\"currently_playing_type\")\r\n\r\n if current_typ == \"track\":\r\n self.current.playback = genmongo.Track.from_json(playback.json)\r\n\r\n elif current_typ == \"episode\":\r\n self.current.playback = genmongo.Episode.from_json(playback.json)\r\n\r\n else:\r\n self.current.playback = None\r\n\r\n # TODO: Switch this to be based off of currently_playing_type field\r\n # instead of the object_map dictionary\r\n # embedded = \\\r\n # [v[0] for k, v in genmongo.object_map.items()\r\n # if isinstance(playback, k)][0]\r\n #\r\n # self.current.playback = \\\r\n # embedded.from_json(playback.json)\r\n\r\n def save(self, **kwargs):\r\n return self.current.save(force_insert=True, **kwargs)\r\n" }, { "alpha_fraction": 0.5599849820137024, "alphanum_fraction": 0.561113178730011, "avg_line_length": 24.59000015258789, "blob_id": "d7e9bec1f202b95097b258cca51b9c7eb34469cb", "content_id": "bdc4e373dd29fc8d5bef55e2978aaab64eef3879", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2659, "license_type": "no_license", "max_line_length": 81, "num_lines": 100, "path": "/spotibot/mongo/utils/Handlers.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import json\r\nimport re\r\n\r\n\r\ndef object_handler(object_to_search: object, key_to_find: str, default=None):\r\n \"\"\"Getter utility function to handle attribute extraction from base\r\n dictionaries as well as SpotiBot and/or Mongo objects.\r\n\r\n Args:\r\n object_to_search: Object within which to find the attribute value\r\n key_to_find: Name of the attribute to find\r\n default: Value to return attribute doesn't exist within object\r\n\r\n \"\"\"\r\n if isinstance(object_to_search, dict):\r\n return object_to_search.get(key_to_find, default)\r\n\r\n else:\r\n try:\r\n return vars(object_to_search).get(key_to_find)\r\n except:\r\n return default\r\n\r\n\r\ndef get_serializable(attr_to_jsonify):\r\n \"\"\"Attempts to run serialization method on an attribute or a list of\r\n attributes.\r\n\r\n Args:\r\n attr_to_jsonify: Attribute to attempt to serialize\r\n\r\n Returns:\r\n Either the serializable form of the attribute or its original form\r\n if to_dict() method cannot be called on it\r\n \"\"\"\r\n if isinstance(attr_to_jsonify, list):\r\n\r\n try:\r\n # v_json = [val.to_dict() for val in attr_to_jsonify]\r\n return [val.to_dict() for val in attr_to_jsonify]\r\n except:\r\n return attr_to_jsonify\r\n # v_json = [val for val in attr_to_jsonify]\r\n\r\n else:\r\n try:\r\n # v_json = attr_to_jsonify.to_dict()\r\n return attr_to_jsonify.to_dict()\r\n except:\r\n # v_json = attr_to_jsonify\r\n return attr_to_jsonify\r\n\r\n # return v_json\r\n\r\n\r\n# def type_to_str(field, to_replace):\r\n# stringified = str(field)\r\n#\r\n# matches = re.findall(r\"class\\s'(\\w+)'\", stringified)\r\n#\r\n# if matches:\r\n# field = matches[0]\r\n# for old, new in to_replace.items():\r\n# field = field.replace(old, new)\r\n#\r\n# return field\r\n\r\n\r\n# def is_iterable(val):\r\n# if isinstance(val, str):\r\n# return False\r\n# else:\r\n# try:\r\n# iter(val)\r\n# return True\r\n# except:\r\n# return False\r\n\r\n\r\n# def has_items(v):\r\n# try:\r\n# for k, v in vars(v).items():\r\n# pass\r\n# return True\r\n# except:\r\n# return False\r\n\r\n\r\ndef is_jsonable(x):\r\n try:\r\n json.dumps(x)\r\n return True\r\n except:\r\n return False\r\n\r\n\r\n# def is_serializable(list_of_dicts: list):\r\n# assert isinstance(list_of_dicts, list), f\"Argument is not of type <'list>'\"\r\n# cnt_ser = [1 if is_jsonable(v) else 0 for v in list_of_dicts]\r\n# return True if sum(cnt_ser) == len(list_of_dicts) else False\r\n" }, { "alpha_fraction": 0.6073972582817078, "alphanum_fraction": 0.6073972582817078, "avg_line_length": 31.882883071899414, "blob_id": "62f4109dd87e47e19ac6f7dc5692b3c70300e65b", "content_id": "a6850b5f74f5fa68d6659c0021ce8e7e3461a2bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3650, "license_type": "no_license", "max_line_length": 84, "num_lines": 111, "path": "/spotibot/core/objects/Device.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import json\nfrom spotibot.mongo.utils.Handlers import object_handler, get_serializable\n\n\nclass Device:\n \"\"\"Auto-generated attribute instantiation docstring for Device\n Object\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n id (str): The device ID. This may be null.\n is_active (bool): If this device is the currently active device.\n is_private_session (bool): If this device is currently in a\n private session.\n is_restricted (bool): Whether controlling this device is\n restricted. At present if this is true then no Web API\n commands will be accepted by this device.\n name (str): The name of the device.\n type (str): Device type, such as Computer, Smartphone or\n Speaker.\n volume_percent (int): The current volume in percent. This may\n be null.\n \"\"\"\n\n def __init__(self, device: dict):\n self.id: str = object_handler(device, \"id\")\n\n # self.id.accepted_values = \\\n # [None, 'Computer', 'Speaker', 'Smartphone']\n\n self.is_active: bool = object_handler(device, \"is_active\")\n\n self.is_private_session: bool = object_handler(device, \"is_private_session\")\n\n self.is_restricted: bool = object_handler(device, \"is_restricted\")\n\n self.name: str = object_handler(device, \"name\")\n\n self.type: str = object_handler(device, \"type\")\n\n self.volume_percent: int = object_handler(device, \"volume_percent\")\n\n # def validate(self):\n # validation_dict = {}\n # for k, v in vars(self).items():\n # sub = {\n # 'type': type(v),\n # 'null_possible': False,\n # 'accepted_vals': []\n # }\n # validation_dict[k] = sub\n # return validation_dict\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n" }, { "alpha_fraction": 0.759036123752594, "alphanum_fraction": 0.759036123752594, "avg_line_length": 25.66666603088379, "blob_id": "ecd2af6030604f1d057bb79054476a51a4dd081a", "content_id": "96ebd719524ecc93bc608c24b89fec6fabea6725", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 83, "license_type": "no_license", "max_line_length": 63, "num_lines": 3, "path": "/spotibot/core/__init__.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "from spotibot.core.utils import Hasher, FileManager, GetConfigs\r\n\r\n# DocParser, \\\r\n" }, { "alpha_fraction": 0.5346659421920776, "alphanum_fraction": 0.5376869440078735, "avg_line_length": 29.93613624572754, "blob_id": "6702ee2a1ab6474d1ddf099710c9a239799eae32", "content_id": "0a3ac1f5a063137b28f7008efac1c984370b04c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19861, "license_type": "no_license", "max_line_length": 117, "num_lines": 642, "path": "/spotibot/core/objects/Activity.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "# !/usr/bin/env python\n# coding: utf-8\n\nimport json\nimport sys\nimport os\nfrom typing import Generator\nimport time\nimport requests\n\nfrom mongoengine import *\n\nproject_dir = r\"/home/gem7318/Github/SpotiBot\"\nsys.path.append(project_dir)\nos.chdir(project_dir)\n\nfrom spotibot.mongo.conn import Connector\n\nfrom spotibot.core.objects import (\n Music as music,\n Podcasts as podcast,\n User as user,\n Context as context,\n Device as device,\n Time as spottime,\n)\n\nfrom spotibot.core.utils import Hasher\n\nfrom spotibot.mongo.utils.Handlers import get_serializable\n\nfrom spotibot.mongo.core.objects import Activity as ActivityDoc\n\n\nclass Request:\n def __init__(self, href, headers):\n\n # :: Request Detail ---------------------------------------------------\n self.unix_request_tmstmp: spottime.Timestamp = spottime.Timestamp(\n time.time(), base=\"seconds\"\n )\n\n response = requests.get(href, headers=headers)\n\n self.ok: bool = response.ok\n\n self.status_code: int = response.status_code\n\n if self.ok and self.status_code == 200:\n self.result: dict = response.json()\n else:\n self.result: dict = {}\n\n self.endpoint_id: str = Hasher.quick_hash(f\"{href}{self.unix_request_tmstmp}\")\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n\nclass Current(Request):\n def __init__(self, user_id, headers, href):\n super().__init__(href, headers)\n\n self.user_id = user_id\n\n self.href = href\n\n # :: Track or Show Instantiation --------------------------------------\n if self.result.get(\"currently_playing_type\") == \"track\":\n\n self.playback: music.Track = music.Track(self.result.get(\"item\"))\n\n elif self.result.get(\"currently_playing_type\") == \"episode\":\n\n self.playback: podcast.Episode = podcast.Episode(self.result.get(\"item\"))\n\n else:\n\n self.playback = None\n\n if self.playback:\n # :: Context ------------------------------------------------------\n self.context: context.Context = context.Context(self.result.get(\"context\"))\n\n # :: Device -------------------------------------------------------\n self.device: list = [device.Device(self.result.get(\"device\"))]\n\n # :: Freestanding Contextual Playback Information -----------------\n self.currently_playing_type = self.result.get(\"currently_playing_type\")\n\n self.shuffle_state = self.result.get(\"shuffle_state\")\n\n self.repeat_state = self.result.get(\"repeat_state\")\n\n self.actions = self.result.get(\"actions\")\n\n self.is_playing = self.result.get(\"is_playing\")\n\n # :: Numeric Contextual Playback Information ----------------------\n self.progress = spottime.Timestamp(\n self.result.get(\"progress_ms\"), base=\"milliseconds\"\n )\n\n self.unix_request_tmstmp = self.unix_request_tmstmp\n\n self.unix_refresh_tmstmp = spottime.Timestamp(\n self.result.get(\"timestamp\"), base=\"milliseconds\"\n )\n\n # :: Newly Formatted Playback Information -------------------------\n self.time_remaining = self.playback.duration.__sub__(self.progress)\n\n self.unix_start_tmstmp = self.unix_request_tmstmp.__sub__(self.progress)\n\n self.unix_expected_end_tmstmp = self.unix_request_tmstmp.__add__(\n self.time_remaining\n )\n\n # :: Current Specific Attributes ----------------------------------\n self._time_listened: spottime.Timestamp = self.progress\n\n self.activity_id = Hasher.quick_hash(\n f\"{self.playback.id}\"\n f\"-{self.unix_start_tmstmp.seconds}\"\n f\"-{self.user_id}\"\n )\n\n self._id = f\"{self.user_id}~{self.unix_request_tmstmp.seconds}~v1\"\n\n self._request_cnt: int = 1\n\n def now(self, headers):\n return Current(headers=headers, user_id=self.user_id, href=self.href)\n\n @property\n def time_listened(self):\n return self._time_listened\n\n @time_listened.setter\n def time_listened(self, time_listened: spottime.Timestamp):\n self._time_listened = time_listened\n\n @property\n def request_cnt(self):\n return self._request_cnt\n\n @request_cnt.setter\n def request_cnt(self, prior=1):\n self._request_cnt = self._request_cnt + prior\n\n # @property\n # def uid(self):\n # return self._id\n #\n # @uid.setter\n # def uid(self, uid: str):\n # self._id = uid\n\n # def __copy__(self):\n # return self.__copy__()\n\n def __repr__(self):\n return f\"Current('{self.user_id}')\"\n\n def __str__(self):\n return f\"'Current' Object for UserDBO: {self.user_id}\"\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n def pop(self, item):\n return vars(self).pop(item)\n\n\nclass Delta:\n def __init__(self, latest: Current, prior: Current):\n\n # :: Comparison (numeric) ---------------------------------------------\n self.progress: spottime.Timestamp = latest.progress - prior.progress\n\n self.duration: spottime.Timestamp = latest.playback.duration - prior.playback.duration\n\n self.time_remaining: spottime.Timestamp = latest.time_remaining - prior.time_remaining\n\n self.unix_start_tmstmp: spottime.Timestamp = latest.unix_start_tmstmp - prior.unix_start_tmstmp\n\n self.unix_request_tmstmp: spottime.Timestamp = latest.unix_request_tmstmp - prior.unix_request_tmstmp\n\n self.unix_refresh_tmstmp: spottime.Timestamp = latest.unix_refresh_tmstmp - prior.unix_refresh_tmstmp\n\n self.prior_request_to_latest_start: spottime.Timestamp = latest.unix_start_tmstmp - prior.unix_request_tmstmp\n\n self.progress_exceeds_request: bool = latest.progress > self.unix_request_tmstmp\n # __________________________________________________________________ ::\n\n # :: Comparison (boolean) ---------------------------------------------\n self.is_same_type: bool = isinstance(latest.playback, type(prior.playback))\n\n self.is_same_id: bool = latest.playback.id == prior.playback.id\n\n self.is_same_device: bool = latest.device[0].name == prior.device[0].name\n\n if (\n not self.is_same_id\n and latest.unix_start_tmstmp < prior.unix_expected_end_tmstmp\n ):\n\n self.cutoff_prior: bool = True\n\n else:\n\n self.cutoff_prior: bool = False\n # __________________________________________________________________ ::\n\n # :: 'Current ID' Decision Tree ---------------------------------------\n self.activity5 = (\n self.progress_exceeds_request,\n {\n True: (False, False, \"Rewound track\"),\n False: (False, True, \"Restarted Track\"),\n },\n )\n\n self.activity4 = (\n self.progress.is_zero,\n {\n True: (False, False, \"Playback paused\"),\n False: (True, None, self.activity5),\n },\n )\n\n self.activity3 = (\n self.progress.is_positive,\n {\n True: (False, False, \"Continued playback\"),\n False: (True, None, self.activity4),\n },\n )\n\n self.activity2 = (\n self.is_same_id,\n {\n True: (True, None, self.activity3),\n False: (False, True, \"New activity instance started\"),\n },\n )\n\n self.activity1 = (\n latest.playback,\n {\n True: (True, False, self.activity2),\n False: (False, False, \"No activity - sleep\"),\n },\n )\n\n self.activity_zipped = (\n val\n for val in [\n self.activity1,\n self.activity2,\n self.activity3,\n self.activity4,\n self.activity5,\n ]\n )\n # __________________________________________________________________ ::\n\n # :: Listened Time if Latest ID [does] equal Prior ID -----------------\n if self.is_same_id:\n\n self.listened1 = (\n self.progress_exceeds_request,\n {\n True: (False, prior.time_listened + self.unix_request_tmstmp, None),\n False: (\n False,\n prior.time_listened + self.prior_request_to_latest_start,\n None,\n ),\n },\n )\n\n self.zipped_listened = (val for val in [self.listened1])\n\n # :: Listened Time if Latest ID does [not] equal Prior ID -------------\n else:\n\n self.listened2 = (\n self.unix_request_tmstmp < prior.time_remaining,\n {\n True: (False, prior.time_listened + self.unix_request_tmstmp, None),\n False: (\n False,\n prior.time_listened\n - (latest.unix_start_tmstmp.__sub__(prior.unix_request_tmstmp)),\n None,\n ),\n },\n )\n\n self.listened1 = (\n self.cutoff_prior,\n {\n True: (True, self.listened2, None),\n False: (False, prior.playback.duration, None),\n },\n )\n\n self.zipped_listened = (val for val in [self.listened1, self.listened2])\n # __________________________________________________________________ ::\n # __________________________________________________________________ ::\n\n @staticmethod\n def validate(condition):\n return True if condition else False\n\n @staticmethod\n def eval_zipper(zipper: Generator):\n\n condition, test = next(zipper)\n desc1 = test.get(Delta.validate(condition))\n cont, outcome, desc = desc1\n\n while cont:\n condition, test = next(zipper)\n desc1 = test.get(Delta.validate(condition))\n cont, outcome, desc = desc1\n\n return outcome, desc\n\n @property\n def activity_comparison(self):\n return self.eval_zipper(self.activity_zipped)\n\n @property\n def prior_listened_detail(self):\n return self.eval_zipper(self.zipped_listened)\n\n def compare(self):\n is_new, desc = self.activity_comparison\n prior_listened, _ = self.prior_listened_detail\n return is_new, prior_listened, desc\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n\nclass Activity(user.UserDBO):\n def __init__(self, username: str):\n super().__init__(username)\n\n self.username: str = self.user_id\n\n self.current = Current(\n user_id=username, headers=self.headers(), href=self.current_playback\n )\n # TODO: Add a setter to Current class containing\n\n self.last_added: dict = {\"track\": \"\", \"episode\": \"\"}\n\n self.play = self.current.now(self.headers())\n\n if self.play.playback:\n self.cached: list = [self.play]\n else:\n self.cached: list = []\n\n self.exceptions: list = []\n\n def write(self, cache_maximum: int = 10):\n\n # TODO, create a '.batch()' method in the Activity class or make it\n # a parent/sub-class that's instantiated with a list of instances\n # on which a 'save()' method can be immediately called\n if len(self.cached) >= cache_maximum:\n\n remainder = self.cached.pop(-1)\n\n connect(host=Connector.get_creds(collection=\"activity\"), alias=\"activity\")\n\n for val in self.cached:\n\n try:\n ActivityDoc.Activity(val).save()\n # TODO: Add an insert_many() method instead of\n # repeatedly calling .save()\n except:\n self.exceptions.append(val)\n\n disconnect(\"activity\")\n\n self.cached = [remainder]\n\n return self\n\n @property\n def resumed_playback(self):\n return (\n self.play.playback.id == self.last_added[self.play.currently_playing_type]\n )\n\n def add_to_all_time_played(self) -> object:\n\n self.last_added[self.play.currently_playing_type] = self.play.playback.id\n\n self.play.playback.add_to_playlist(\n playlist_href=self.activity_playlist_hrefs.get(\n self.play.currently_playing_type\n ),\n headers=self.headers(post=True),\n )\n\n return self\n\n def run(\n self,\n active_req_rate: int = 25,\n dormant_req_rate: int = 300,\n cache_maximum: int = 10,\n ) -> object:\n\n self.play = self.current.now(self.headers())\n\n if self.play.playback:\n self.cached.append(self.play)\n\n while self.play.status_code not in [\n 400, # Bad / Malformed Request\n 401, # Unauthorized\n 403, # Forbidden\n ]:\n\n # print(f\"Tokens expiring in: {self.tokens.expires_in.minutes}\")\n\n self.play = self.current.now(self.headers())\n\n if self.play.playback and not self.cached:\n\n self.cached.append(self.play)\n\n elif self.play.playback:\n\n delta = Delta(self.play, self.cached[-1])\n is_new, time_listened, desc = delta.compare()\n\n # TODO: Figure out why time listened continues to accumulate\n # even after pausing has occurred\n # print(time_listened.seconds)\n print(\n f\"Outcome:\\n\\t{desc}\\n\"\n f\"New Play:\\n\\t{is_new}\\n\"\n f\"Resumed Playback:\\n\\t{self.resumed_playback}\"\n )\n\n if not is_new:\n # self.play.request_cnt = delta.prior.request_cnt\n self.play.request_cnt = self.cached[-1].request_cnt\n self.play.time_listened = time_listened\n self.cached.pop(-1)\n\n # elif not delta.is_same_id:\n elif self.resumed_playback:\n print(f\"< no current activity - {dormant_req_rate}s sleep>\")\n time.sleep(dormant_req_rate)\n\n # elif not self.resumed_playback:\n else:\n\n print(f\"< adding to activity playlist >\\n\")\n\n self.last_added[\n self.play.currently_playing_type\n ] = self.play.playback.id\n\n self.add_to_all_time_played()\n\n self.cached.append(self.play)\n self.write(cache_maximum=cache_maximum)\n time.sleep(active_req_rate)\n\n # else:\n print(f\"< no current activity - {dormant_req_rate}s sleep>\")\n time.sleep(dormant_req_rate)\n\n return self\n\n\n# TODO: Add something that always checks the last track added to the podcast\n# all time played playlist to make sure not repeatedly adding pods -\n# alternatively could just have it always store that last track and last\n# podcast added to each playlist and only add new tracks that aren't equal\n# to those\n" }, { "alpha_fraction": 0.8541666865348816, "alphanum_fraction": 0.8541666865348816, "avg_line_length": 46, "blob_id": "f413db13a2c1835feea08c571e83ceb275629695", "content_id": "48bc07112d564a27939948bc6a5433729a1c6f7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 46, "num_lines": 1, "path": "/spotibot/base/config/__init__.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "from spotibot.base.config import Configuration\r\n" }, { "alpha_fraction": 0.5549656748771667, "alphanum_fraction": 0.5724546909332275, "avg_line_length": 27.33628273010254, "blob_id": "2ee43e7857fb135998a88ebf6c8ed3b6366dfc62", "content_id": "fb68f4f837f3d94cda0356e7bd36d87b4b728e54", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3202, "license_type": "no_license", "max_line_length": 78, "num_lines": 113, "path": "/spotibot/core/objects/Time.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import json\nfrom spotibot.mongo.utils.Handlers import object_handler, get_serializable\n\n\nclass Timestamp:\n def __init__(self, raw, base=\"seconds\"):\n\n self.raw = raw\n self.base = base\n\n if base == \"seconds\":\n self.adj_ms = 1_000\n self.adj_ns = 1_000_000_000\n self.adj_sec = 1 / 1\n\n elif base == \"milliseconds\":\n self.adj_ms = 1 / 1\n self.adj_ns = 1_000_000\n self.adj_sec = 1 / 1_000\n\n elif base == \"nanoseconds\":\n self.adj_ms = 1 / 1_000_000\n self.adj_ns = 1 / 1\n self.adj_sec = 1 / 1_000_000_000\n\n self.seconds: int = int(self.adj_sec * self.raw)\n\n self.milliseconds: int = int(self.adj_ms * self.raw)\n\n self.nanoseconds: int = int(self.adj_ns * self.raw)\n\n self.minutes: int = int(self.seconds / 60)\n\n self.is_positive: bool = self.raw > 0\n\n self.is_negative: bool = self.raw < 0\n\n self.is_zero: bool = self.raw == 0\n\n def __int__(self):\n return int(self.raw)\n\n def __add__(self, other):\n other_secs = other.seconds\n total_secs = self.seconds + other_secs\n return Timestamp(total_secs, base=\"seconds\")\n\n def __sub__(self, other):\n other_secs = other.seconds\n total_secs = self.seconds - other_secs\n return Timestamp(total_secs, base=\"seconds\")\n\n def __lt__(self, other) -> bool:\n return self.seconds < other.seconds\n\n def __gt__(self, other) -> bool:\n return self.seconds > other.seconds\n\n def __eq__(self, other) -> bool:\n return self.seconds == other.seconds\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n" }, { "alpha_fraction": 0.5351418256759644, "alphanum_fraction": 0.5376079082489014, "avg_line_length": 21.764705657958984, "blob_id": "fe84aec18bef5af5f40e9d53e74a765bcfffedfc", "content_id": "40937e1395273e99e64a2c4468bf73fdb36d032d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1622, "license_type": "no_license", "max_line_length": 79, "num_lines": 68, "path": "/tests/conftest.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "\r\n\r\nimport pytest\r\nimport os\r\nimport pickle\r\nimport json\r\n\r\nfrom spotibot.core.objects import \\\r\n Music as music, \\\r\n Podcasts as podcast, \\\r\n Context as context, \\\r\n Device as device\r\n\r\nfixture_src_dir = os.path.join(os.getcwd(), '_pytest_Fixture_src')\r\n\r\npickle_obj_map: dict = \\\r\n {'track~full': music.Track,\r\n 'album~simplified': music.Album,\r\n 'artist~simplified': music.Artist,\r\n 'device~base': device.Device,\r\n 'context~base': context.Context\r\n }\r\n\r\n\r\[email protected](\r\n params=[\r\n (pickle_obj_map, fixture_src_dir)],\r\n\r\n scope='module'\r\n)\r\ndef result(request):\r\n\r\n fixture = {}\r\n\r\n obj_map, src_dir = request.param\r\n\r\n for obj_name, spot_obj in obj_map.items():\r\n\r\n in_dir = os.path.join(src_dir, f\"in_{obj_name}.pkl\")\r\n out_dir = os.path.join(src_dir, f\"out_{obj_name}.pkl\")\r\n\r\n with open(in_dir, 'rb') as r:\r\n result_in = pickle.load(r)\r\n\r\n with open(out_dir, 'rb') as r2:\r\n expected_out = pickle.load(r2)\r\n\r\n fixture[obj_name] = (spot_obj, result_in, expected_out)\r\n\r\n return fixture\r\n\r\n\r\n# TODO: Add other objects and clean up the code/re-factor if needed\r\n# -----------------------------------------------------------------------------\r\n\r\n\r\n# @pytest.fixture(\r\n# params=[\r\n# r'activity_result_track.pkl'],\r\n# # ({'attr': 2}, 'attr', 2)\r\n# # ],\r\n# scope='module'\r\n# )\r\n# def result(request):\r\n# dir_to_open = os.path.join(fixture_src_dir, request.param)\r\n#\r\n# with open(dir_to_open, 'rb') as r:\r\n# result = pickle.load(r)\r\n#\r\n# return result\r\n\r\n" }, { "alpha_fraction": 0.6863238215446472, "alphanum_fraction": 0.6863238215446472, "avg_line_length": 26.899749755859375, "blob_id": "cab1afa2432fa13e20b5f69a7588a6ec9ddbcc08", "content_id": "b11dc95da329c280bc8b720c53d43ca6cf8d20e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11531, "license_type": "no_license", "max_line_length": 85, "num_lines": 399, "path": "/spotibot/mongo/objects/General.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import configparser\r\nimport os\r\n\r\nconfig = configparser.ConfigParser()\r\n\r\nconfig.read(os.path.join(os.getcwd(), \"mongo_creds.cfg\"))\r\n\r\nfrom mongoengine import *\r\n\r\nfrom spotibot.core.objects import (\r\n Activity as act,\r\n Music as music,\r\n Podcasts as podcasts,\r\n Device as device,\r\n Time as spottime,\r\n General as gen,\r\n Context as cont,\r\n)\r\n\r\n\r\nclass Timestamp(EmbeddedDocument):\r\n raw: int = IntField(required=False)\r\n\r\n base: str = StringField(required=False)\r\n\r\n adj_ms: int = IntField(required=False)\r\n\r\n adj_ns: int = IntField(required=False)\r\n\r\n adj_sec: int = IntField(required=False)\r\n\r\n seconds: int = IntField(required=False)\r\n\r\n milliseconds: int = IntField(required=False)\r\n\r\n nanoseconds: int = IntField(required=False)\r\n\r\n minutes: int = IntField(required=False)\r\n\r\n is_positive: bool = BooleanField(required=False)\r\n\r\n is_negative: bool = BooleanField(required=False)\r\n\r\n is_zero: bool = BooleanField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Device(EmbeddedDocument):\r\n is_active: bool = BooleanField(required=False)\r\n\r\n is_private_session: bool = BooleanField(required=False)\r\n\r\n is_restricted: bool = BooleanField(required=False)\r\n\r\n name: str = StringField(required=False)\r\n\r\n type: str = StringField(required=False)\r\n\r\n volume_percent: int = IntField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass ExternalId(EmbeddedDocument):\r\n typ: str = StringField(required=False)\r\n\r\n id: str = StringField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass ExternalUrl(EmbeddedDocument):\r\n typ: str = StringField(required=False)\r\n\r\n url: str = StringField(required=False)\r\n\r\n spotify: str = StringField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Image(EmbeddedDocument):\r\n url: str = StringField(required=False, primary_key=True)\r\n\r\n height: int = IntField(required=False)\r\n\r\n width: int = IntField(required=False)\r\n\r\n id: str = StringField(required=False, primary_key=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Artist(EmbeddedDocument):\r\n external_urls = EmbeddedDocumentField(ExternalUrl, required=False)\r\n\r\n external_ids = EmbeddedDocumentField(ExternalId, required=False)\r\n\r\n followers: dict = DictField(required=False)\r\n\r\n genres = ListField(required=False)\r\n\r\n href: str = StringField(required=False)\r\n\r\n id: str = StringField(required=False, primary_key=True)\r\n\r\n images = ListField(required=False)\r\n\r\n name: str = StringField(required=False)\r\n\r\n popularity: int = IntField(required=False)\r\n\r\n type: str = StringField(required=False)\r\n\r\n uri: str = StringField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Album(EmbeddedDocument):\r\n # _id: str = StringField(required=True, primary_key=True)\r\n\r\n album_type: str = StringField(required=False)\r\n\r\n artists: list = ListField(EmbeddedDocumentField(Artist, required=False))\r\n\r\n available_markets: list = ListField(required=False)\r\n\r\n external_urls = EmbeddedDocumentField(ExternalUrl, required=False)\r\n\r\n href: str = StringField(required=False)\r\n\r\n id: str = StringField(required=False, primary_key=True)\r\n\r\n images: list = ListField(EmbeddedDocumentField(Image, required=False))\r\n\r\n name: str = StringField(required=False)\r\n\r\n release_date: str = StringField(required=False)\r\n\r\n release_date_precision: str = StringField(required=False)\r\n\r\n restrictions: str = StringField(required=False)\r\n\r\n type: str = StringField(required=False)\r\n\r\n uri: str = StringField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Track(EmbeddedDocument):\r\n # _id: str = StringField(required=True, primary_key=True)\r\n\r\n album = EmbeddedDocumentField(Album, required=False)\r\n\r\n artists: list = ListField(EmbeddedDocumentField(Artist, required=False))\r\n\r\n available_markets = ListField(required=False)\r\n\r\n disc_number: int = IntField(required=False)\r\n\r\n duration: int = EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n explicit = BooleanField(required=False)\r\n\r\n external_ids = EmbeddedDocumentField(ExternalId, required=False)\r\n\r\n external_urls = EmbeddedDocumentField(ExternalUrl, required=False)\r\n\r\n href: str = StringField(required=False)\r\n\r\n id: str = StringField(required=False, primary_key=True)\r\n\r\n is_playable: bool = BooleanField(required=False)\r\n\r\n linked_from = StringField(required=False)\r\n\r\n restrictions = StringField(required=False)\r\n\r\n name: str = StringField(required=False)\r\n\r\n popularity: int = IntField(required=False)\r\n\r\n preview_url: str = StringField(required=False)\r\n\r\n track_number: int = IntField(required=False)\r\n\r\n type: str = StringField(required=False)\r\n\r\n uri: str = StringField(required=False)\r\n\r\n is_local: bool = BooleanField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Show(EmbeddedDocument):\r\n # _id: str = StringField(required=True, primary_key=True)\r\n\r\n available_markets = ListField(required=False)\r\n\r\n copyrights: str = StringField(required=False)\r\n\r\n description: str = StringField(required=False)\r\n\r\n explicit: bool = BooleanField(required=False)\r\n\r\n external_urls = EmbeddedDocumentField(ExternalUrl, required=False)\r\n\r\n href: str = StringField(required=False)\r\n\r\n id: str = StringField(required=False)\r\n\r\n images: list = ListField(EmbeddedDocumentField(Image, required=False))\r\n\r\n is_externally_hosted: bool = BooleanField(required=False)\r\n\r\n language: str = StringField(required=False)\r\n\r\n media_type: str = StringField(required=False)\r\n\r\n name: str = StringField(required=False)\r\n\r\n publisher: str = StringField(required=False)\r\n\r\n type: str = StringField(required=False)\r\n\r\n uri: str = StringField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Episode(EmbeddedDocument):\r\n audio_preview_url: str = StringField(required=False)\r\n\r\n description: str = StringField(required=False)\r\n\r\n duration: int = EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n explicit: bool = BooleanField(required=False)\r\n\r\n external_urls = EmbeddedDocumentField(ExternalUrl, required=False)\r\n\r\n href: str = StringField(required=False)\r\n\r\n id: str = StringField(required=False)\r\n\r\n images: list = ListField(EmbeddedDocumentField(Image, required=False))\r\n\r\n is_externally_hosted: bool = BooleanField(required=False)\r\n\r\n is_playable: bool = BooleanField(required=False)\r\n\r\n language: str = StringField(required=False)\r\n\r\n languages = ListField(required=False)\r\n\r\n name: str = StringField(required=False)\r\n\r\n release_date: str = StringField(required=False)\r\n\r\n release_date_precision: str = StringField(required=False)\r\n\r\n resume_point = StringField(required=False)\r\n\r\n show = StringField(required=False)\r\n\r\n type: str = StringField(required=False)\r\n\r\n uri: str = StringField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Response(EmbeddedDocument):\r\n # _id: str = StringField(required=False, primary_key=True)\r\n\r\n ok: bool = BooleanField(required=False)\r\n\r\n status_code: int = IntField(required=False)\r\n\r\n result: dict = DictField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Request(DynamicEmbeddedDocument):\r\n user_id: str = StringField(required=False)\r\n\r\n activity_endpoint: str = StringField(required=False)\r\n\r\n user_activity_playlist_id: str = StringField(required=False)\r\n\r\n unix_request_tmstmp: Timestamp = EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n # response: Response = \\\r\n # EmbeddedDocumentField(Response, required=False)\r\n\r\n endpoint_id: str = StringField(required=False, primary_key=True)\r\n\r\n ok: bool = BooleanField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Context(EmbeddedDocument):\r\n href: str = StringField(required=False)\r\n\r\n type: str = StringField(required=False)\r\n\r\n uri: str = StringField(required=False)\r\n\r\n external_urls = EmbeddedDocumentField(ExternalUrl, required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nclass Current(DynamicDocument):\r\n # TODO: In rest of script, change the built-in `.save()` method to use\r\n # the `.save()` method automatically inherited from Document and\r\n # DynamicDocument via the following source:\r\n # https://realpython.com/introduction-to-mongodb-and-python/\r\n\r\n _id: str = StringField(required=False, primary_key=True)\r\n\r\n user_id: str = StringField(required=False)\r\n\r\n activity_endpoint: str = StringField(required=False)\r\n\r\n user_playlist_dict: dict = DictField(required=False)\r\n\r\n user_activity_playlist_id: str = StringField(required=False)\r\n\r\n unix_request_tmstmp: Timestamp = EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n # response: Response = \\\r\n # EmbeddedDocumentField(Response, required=False)\r\n\r\n endpoint_id: str = StringField(required=False)\r\n\r\n ok: bool = BooleanField(required=False)\r\n\r\n # request: Request = \\\r\n # EmbeddedDocumentField(Request, required=False)\r\n\r\n playback = GenericEmbeddedDocumentField(required=True)\r\n\r\n context: Context = EmbeddedDocumentField(Context, required=False)\r\n\r\n device: list = ListField(EmbeddedDocumentField(Device, required=False))\r\n\r\n currently_playing_type: str = StringField(required=False)\r\n\r\n shuffle_state: bool = BooleanField(required=False)\r\n\r\n repeat_state: str = StringField(required=False)\r\n\r\n actions: dict = DictField(required=False)\r\n\r\n is_playing: bool = BooleanField(required=False)\r\n\r\n progress: Timestamp = EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n # unix_request_tmstmp: Timestamp = \\\r\n # EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n unix_refresh_tmstmp: Timestamp = EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n time_remaining: Timestamp = EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n unix_start_tmstmp: Timestamp = EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n unix_expected_end_tmstmp: Timestamp = EmbeddedDocumentField(\r\n Timestamp, required=False\r\n )\r\n\r\n time_listened: Timestamp = EmbeddedDocumentField(Timestamp, required=False)\r\n\r\n activity_id: str = StringField(required=False)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"activity\", \"db\": \"SpotiBot\"}\r\n\r\n\r\nobject_map = {\r\n music.Album: (Album(), Album),\r\n music.Track: (Track(), Track),\r\n music.Artist: (Artist(), Artist),\r\n gen.ExternalUrl: (ExternalUrl(), ExternalUrl),\r\n spottime.Timestamp: (Timestamp(), Timestamp),\r\n gen.Image: (Image(), Image),\r\n cont.Context: (Context(), Context),\r\n device.Device: (Device(), Device),\r\n podcasts.Episode: (Episode(), Episode),\r\n podcasts.Show: (Show(), Show),\r\n}\r\n# act.Request: (Request(), Request)}\r\nfrom spotibot.core.objects import Activity as activity\r\n" }, { "alpha_fraction": 0.7095990180969238, "alphanum_fraction": 0.7144593000411987, "avg_line_length": 21.514286041259766, "blob_id": "26399f9d760ec0be0afd5801cfda889669ef3c76", "content_id": "b6f1fe2aa6abe57102ebda2fa38b5941652977b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 823, "license_type": "no_license", "max_line_length": 63, "num_lines": 35, "path": "/spotibot/mongo/objects/mongo.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import configparser\r\nimport os\r\nimport pickle\r\nimport jsonpickle\r\n\r\nfrom spotibot.mongo.conn import Connector\r\n\r\nfrom spotibot.mongo.objects import General as genmong\r\n\r\nfrom mongoengine import *\r\n\r\npkl_dir = os.path.join(os.getcwd(), \"_pkl\")\r\n\r\nconnect(host=Connector.get_creds(collection=\"activity\"))\r\n\r\nwith open(os.path.join(pkl_dir, \"activity2.pkl\"), \"rb\") as r:\r\n activity2 = pickle.load(r)\r\n\r\n\r\ntrack_in = activity2.cached[3].playback\r\n\r\nmongo_track = genmong.Track()\r\nmongo_track[\"obj\"] = jsonpickle.encode(track_in)\r\nmongo_track[\"_id\"] = track_in.id\r\n\r\nmongo_track.save(force_insert=True)\r\n\r\nmongo_track_out = genmong.Track.objects(pk=track_in.id).first()\r\n\r\nmongo_track_out_dec = jsonpickle.decode(mongo_track_out[\"obj\"])\r\n\r\ntype(track_in)\r\ntype(mongo_track)\r\ntype(mongo_track_out)\r\ntype(mongo_track_out_dec)\r\n" }, { "alpha_fraction": 0.6056864857673645, "alphanum_fraction": 0.6116722822189331, "avg_line_length": 22.541284561157227, "blob_id": "b12762f642960cb991dbcd7392f58b8ca1f1fd19", "content_id": "ff420d3473fe6078a724abf0b287bd5677277c36", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2673, "license_type": "no_license", "max_line_length": 75, "num_lines": 109, "path": "/utils_independent/TestCases.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "from spotibot.core.objects import User as user\r\nimport requests\r\n\r\nimport pickle\r\nimport os\r\n\r\nfrom spotibot.core.objects import \\\r\n Music as music, \\\r\n Podcasts as podcast, \\\r\n Context as context, \\\r\n Device as device\r\n\r\n\r\ngem = user.UserDBO('125393293')\r\n\r\nrequest = requests.get(gem.current_playback, headers=gem.headers())\r\nresult = request.json()\r\n\r\nto_export: dict = \\\r\n {'track~full': result['item'],\r\n 'album~simplified': result.get('item').get('album'),\r\n 'artist~simplified': result.get('item').get('artists')[0],\r\n 'device~base': result.get('device'),\r\n 'context~base': result.get('context')\r\n }\r\n\r\npickle_obj_map: dict = \\\r\n {'track~full': music.Track,\r\n 'album~simplified': music.Album,\r\n 'artist~simplified': music.Artist,\r\n 'device~base': device.Device,\r\n 'context~base': context.Context\r\n }\r\n\r\nfixture_src_dir = os.path.join(os.getcwd(), '_pytest_Fixture_src')\r\n\r\nfor obj_name, obj_dict in to_export.items():\r\n\r\n spot_obj = pickle_obj_map.get(obj_name)\r\n instantiated_obj = spot_obj(obj_dict)\r\n\r\n with open(\r\n os.path.join(fixture_src_dir, f'in_{obj_name}.pkl'),\r\n 'wb') as f:\r\n pickle.dump(obj_dict, f)\r\n\r\n with open(os.path.join(fixture_src_dir, f'out_{obj_name}.pkl'),\r\n 'wb') as f2:\r\n pickle.dump(instantiated_obj, f2)\r\n\r\n\r\n\r\n\r\n\r\n\r\nmusic.Track(result.get('item'))\r\nmusic.Artist(result.get('item').get('artists')[0])\r\n\r\nwith open(os.path.join(fixture_src_dir,\r\n 'activity_request_track.pkl'), 'wb') as f:\r\n pickle.dump(request, f)\r\n\r\nwith open(os.path.join(fixture_src_dir,\r\n 'activity_result_track.pkl'), 'wb') as f:\r\n pickle.dump(result, f)\r\n\r\nwith open(os.path.join(fixture_src_dir, 'device~base.pkl'), 'rb') as r:\r\n device_r = pickle.load(r)\r\n\r\n\r\nfrom importlib import reload\r\nfrom spotibot.core.objects import Device as device\r\nreload(device)\r\n\r\n\r\ntest = device.Device(result['device'])\r\n\r\nwith open(os.path.join(fixture_src_dir, 'device~base~out.pkl'), 'wb') as w:\r\n pickle.dump(test, w)\r\n\r\nwith open(os.path.join(fixture_src_dir, 'device~base~out.pkl'), 'rb') as r:\r\n device_r2 = pickle.load(r)\r\n\r\n\r\nvars(test) == vars(device_r2)\r\n\r\n\r\nfor k, v in test.validate().items():\r\n print(f\"{k}:\\n\\t {v}\")\r\n print(re.findall(r\"class\\s'(\\w+)'\", str(v.get('type'))))\r\n print('\\n')\r\n\r\nitemized_obj = vars(test)\r\n\r\nvars(device.Device)\r\n\r\ncnt_invalid = sum([1 for v in itemized_obj.values() if not v])\r\ncnt_total = len(itemized_obj)\r\n\r\ncnt_invalid\r\ncnt_total\r\n\r\nassert object_checker(test, device.Device)\r\n\r\n\r\nlen(vars(test))\r\n\r\nfor k, v in vars(test).items():\r\n print(k)" }, { "alpha_fraction": 0.6249288320541382, "alphanum_fraction": 0.6260671615600586, "avg_line_length": 28.29310417175293, "blob_id": "7aae96356dff73e667a9fc4ccbe568cb2650dbfe", "content_id": "a08771a40ff8876d3ba3c34341ec32e180ba356a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1757, "license_type": "no_license", "max_line_length": 81, "num_lines": 58, "path": "/spotibot/core/utils/FileManager.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "# Imports\r\nimport pandas as pd\r\nimport os\r\nfrom spotibot.core.utils import Hasher as util\r\n\r\n\r\ndef get_activity_playlist_for_user():\r\n \"\"\"\r\n Returns dictionary of user_id: user_activity_playlist_id from source csv.\r\n # Finds relative path for directory storing single user_playlist_bot.csv.\r\n # Extracts full path to first .csv file in directory (assumes only a\r\n # single file in the directory).\r\n # Imports df.\r\n # Uses utility function from utils.py to get a dictionary of {user_id:\r\n # playlist_id} where playlist_id is the user's 'My Listening History'\r\n # playlist.\r\n :return:\r\n \"\"\"\r\n dir_user_activity_playlist = os.path.join(\r\n os.getcwd(), \"_data\", \"UserDBO Playlists\", \"py_managed_user_playlist_dim\"\r\n )\r\n\r\n path_to_file = [\r\n os.path.join(dir_user_activity_playlist, file)\r\n for file in os.listdir(dir_user_activity_playlist)\r\n if \".csv\" in file\r\n ][0]\r\n\r\n df = pd.read_csv(path_to_file, na_values=\"nan\")\r\n\r\n uid_pid_dict = util.dict_from_df(df, \"user_id\", \"playlist_id\")\r\n\r\n return uid_pid_dict\r\n\r\n\r\ndef get_user_dim_dict():\r\n \"\"\"\r\n Returns dictionary of user_id: associated display name, href,\r\n and follower count.\r\n # Follows same logic as get_activity_playlist_for_user()\r\n \"\"\"\r\n dir_user_activity_playlist = os.path.join(\r\n os.getcwd(), \"_data\", \"UserDBO Information\"\r\n )\r\n\r\n path_to_file = [\r\n os.path.join(dir_user_activity_playlist, file)\r\n for file in os.listdir(dir_user_activity_playlist)\r\n if \".csv\" in file\r\n ][0]\r\n\r\n user_df = pd.read_csv(path_to_file)\r\n\r\n user_df.set_index(\"user_id\", drop=True, inplace=True)\r\n\r\n user_dim_dict = user_df.to_dict(\"index\")\r\n\r\n return user_dim_dict\r\n" }, { "alpha_fraction": 0.5678315758705139, "alphanum_fraction": 0.5684827566146851, "avg_line_length": 28.915584564208984, "blob_id": "870248cbd30e6fa3f7ca240967191fce252f3f8e", "content_id": "0d384a248734aad02abe43e6ecda2fcaa530e4b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4607, "license_type": "no_license", "max_line_length": 81, "num_lines": 154, "path": "/spotibot/core/objects/Request.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import time\nimport requests\nimport json\n\nfrom spotibot.core.objects import Time as spottime, User as user\n\nfrom spotibot.core.utils import Hasher as hasher\n\nfrom spotibot.mongo.utils.Handlers import get_serializable\n\n# TODO: Need to have something here that indicates downstream actions to not\n# even attempt to execute if it returns nothing/playback has gone dormant\n# class Response:\n#\n# def __init__(self, response):\n#\n# self.ok: bool = response.ok\n#\n# self.status_code: int = response.status_code\n#\n# if self.ok and self.status_code == 200:\n# self.result: dict = response.json()\n# else:\n# self.result: dict = {}\n#\n# def __eq__(self, other) -> bool:\n# \"\"\"Equality comparison to other objects.\n#\n# Args:\n# other: Comparison object\n#\n# Returns:\n# Boolean value indicating whether or not the attributes and their\n# associated values are equal between the two objects\n# \"\"\"\n# return vars(self) == vars(other)\n#\n# def __getitem__(self, item: str):\n# \"\"\"Getter method for subscriptability.\n#\n# Args:\n# item: Attribute to get the value of\n#\n# Returns:\n# Attribute value if exists in object's namespace\n# \"\"\"\n# return getattr(self, item)\n#\n# def get(self, item: str, default=None):\n# \"\"\"Method for extracting attributes without throwing existence errors.\n#\n# Args:\n# item: Attribute to get the value of\n# default: Return value if attribute doesn't exist\n#\n# Returns:\n# Attribute value or default if attribute does not exist\n# \"\"\"\n# return vars(self).get(item, default)\n#\n# def to_dict(self) -> dict:\n# \"\"\"Calling utility serialization method on all attributes.\n#\n# Returns:\n# String following valid json structure for mongo serialization.\n# \"\"\"\n# return {k: get_serializable(v) for k, v in vars(self).items()}\n#\n# @property\n# def json(self) -> str:\n# \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n# serialization purposes\n#\n# Returns:\n# Serializable 'json' output of SpotiBot object\n# \"\"\"\n# return json.dumps(self.to_dict())\n#\n#\n# # class Request(user.UserDBO):\n# class Request:\n#\n# def __init__(self, headers):\n#\n# self.headers = headers\n#\n# # --------------------------/Request Detail/---------------------------\n#\n# def http_get(self, href):\n#\n# self.unix_request_tmstmp: spottime.Timestamp = \\\n# spottime.Timestamp(time.time(), base='seconds')\n#\n# self.response = \\\n# Response(requests.get(href, headers=self.headers))\n#\n# self.endpoint_id: str = \\\n# hasher.quick_hash(\n# f\"{href}{self.unix_request_tmstmp}\")\n#\n# return self\n#\n# def __eq__(self, other) -> bool:\n# \"\"\"Equality comparison to other objects.\n#\n# Args:\n# other: Comparison object\n#\n# Returns:\n# Boolean value indicating whether or not the attributes and their\n# associated values are equal between the two objects\n# \"\"\"\n# return vars(self) == vars(other)\n#\n# def __getitem__(self, item: str):\n# \"\"\"Getter method for subscriptability.\n#\n# Args:\n# item: Attribute to get the value of\n#\n# Returns:\n# Attribute value if exists in object's namespace\n# \"\"\"\n# return getattr(self, item)\n#\n# def get(self, item: str, default=None):\n# \"\"\"Method for extracting attributes without throwing existence errors.\n#\n# Args:\n# item: Attribute to get the value of\n# default: Return value if attribute doesn't exist\n#\n# Returns:\n# Attribute value or default if attribute does not exist\n# \"\"\"\n# return vars(self).get(item, default)\n#\n# def to_dict(self) -> dict:\n# \"\"\"Calling utility serialization method on all attributes.\n#\n# Returns:\n# String following valid json structure for mongo serialization.\n# \"\"\"\n# return {k: get_serializable(v) for k, v in vars(self).items()}\n#\n# @property\n# def json(self) -> str:\n# \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n# serialization purposes\n#\n# Returns:\n# Serializable 'json' output of SpotiBot object\n# \"\"\"\n# return json.dumps(self.to_dict())\n" }, { "alpha_fraction": 0.5279502868652344, "alphanum_fraction": 0.5279502868652344, "avg_line_length": 11.416666984558105, "blob_id": "106ee7e4abe65aa0271858489cf2430a3593e650", "content_id": "58c842f4e04a3860f59ca1c580e2bf97c7296b87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 35, "num_lines": 12, "path": "/spotibot/core/objects/__init__.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "from spotibot.core.objects import (\r\n Activity,\r\n Context,\r\n General,\r\n Music,\r\n Podcasts,\r\n Time,\r\n User,\r\n Device,\r\n)\r\n\r\n# Request, \\\r\n" }, { "alpha_fraction": 0.7749999761581421, "alphanum_fraction": 0.7749999761581421, "avg_line_length": 38, "blob_id": "cd5f0436f06f32306a6ef9702af18f345126c221", "content_id": "ffb194ee439743b06c7653e02a4f4a678440b2b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 40, "license_type": "no_license", "max_line_length": 38, "num_lines": 1, "path": "/spotibot/base/auth/__init__.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "# from spotibot.base.auth import OAuth\r\n" }, { "alpha_fraction": 0.5843837261199951, "alphanum_fraction": 0.5861344337463379, "avg_line_length": 25.7281551361084, "blob_id": "2953b4a7649a918212039a8510fe0600a44dee1a", "content_id": "4d082cb83f166bbc81e04b32bbda0cc185ae630a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2856, "license_type": "no_license", "max_line_length": 86, "num_lines": 103, "path": "/spotibot/core/endpoints/Generic.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import json\r\nimport requests\r\n\r\nfrom spotibot.mongo.utils.Handlers import get_serializable\r\n\r\nfrom spotibot.base.config import Configuration\r\n\r\nconfig = Configuration.Config()\r\n\r\n# class Request:\r\n#\r\n# def __init__(self, href: str, headers: dict):\r\n\r\n\r\nclass Playlist:\r\n def __init__(self, href: str, headers: dict):\r\n\r\n self.href = href\r\n\r\n self.headers = headers\r\n\r\n request = requests.get(href, headers=headers)\r\n\r\n if request.ok:\r\n self.result = request.json()\r\n\r\n\r\nclass Href:\r\n def __init__(self, username: str):\r\n self.username = username\r\n\r\n self.current_playback = (\r\n r\"https://api.spotify.com/v1/me/player?\" r\"additional_types=track,episode\"\r\n )\r\n\r\n @property\r\n def playlists(self):\r\n return (\r\n f\"https://api.spotify.com/v1/users/\" f\"{self.username}/playlists?limit=50\"\r\n )\r\n\r\n @property\r\n def tracks_all_time(self):\r\n return\r\n\r\n def new_playlist(self, playlist_name: str, playlist_desc: str = None):\r\n href = f\"https://api.spotify.com/v1/users/{self.username}/playlists\"\r\n payload = json.dumps({\"name\": playlist_name, \"description\": playlist_desc})\r\n\r\n return href, payload\r\n\r\n def __eq__(self, other) -> bool:\r\n \"\"\"Equality comparison to other objects.\r\n\r\n Args:\r\n other: Comparison object\r\n\r\n Returns:\r\n Boolean value indicating whether or not the attributes and their\r\n associated values are equal between the two objects\r\n \"\"\"\r\n return vars(self) == vars(other)\r\n\r\n def __getitem__(self, item: str):\r\n \"\"\"Getter method for subscriptability.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n\r\n Returns:\r\n Attribute value if exists in object's namespace\r\n \"\"\"\r\n return getattr(self, item)\r\n\r\n def get(self, item: str, default=None):\r\n \"\"\"Method for extracting attributes without throwing existence errors.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n default: Return value if attribute doesn't exist\r\n\r\n Returns:\r\n Attribute value or default if attribute does not exist\r\n \"\"\"\r\n return vars(self).get(item, default)\r\n\r\n def to_dict(self) -> dict:\r\n \"\"\"Calling utility serialization method on all attributes.\r\n\r\n Returns:\r\n String following valid json structure for mongo serialization.\r\n \"\"\"\r\n return {k: get_serializable(v) for k, v in vars(self).items()}\r\n\r\n @property\r\n def json(self) -> str:\r\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\r\n serialization purposes\r\n\r\n Returns:\r\n Serializable 'json' output of SpotiBot object\r\n \"\"\"\r\n return json.dumps(self.to_dict())\r\n" }, { "alpha_fraction": 0.8307692408561707, "alphanum_fraction": 0.8307692408561707, "avg_line_length": 63, "blob_id": "88626300ef143bc34f8555e5715b1ad99f6e4343", "content_id": "7ce53c2f998c9498463bacba08e200f6ee99068c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 65, "license_type": "no_license", "max_line_length": 63, "num_lines": 1, "path": "/spotibot/core/utils/__init__.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "from spotibot.core.utils import Hasher, FileManager, GetConfigs\r\n" }, { "alpha_fraction": 0.5804633498191833, "alphanum_fraction": 0.5804633498191833, "avg_line_length": 28.673076629638672, "blob_id": "8e6754006d8fabb91ecdaebe315c64d039026ff7", "content_id": "b76917c98135fb196e6880712d46600fb955c62f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1597, "license_type": "no_license", "max_line_length": 79, "num_lines": 52, "path": "/tests/core/objects/test_Music.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "\r\nimport os\r\n\r\nfrom spotibot.core.objects import \\\r\n Music\r\n\r\nfrom spotibot.mongo.utils.Handlers import \\\r\n is_jsonable\r\n\r\n\r\ndef test_instantiation_serialization(result: dict):\r\n \"\"\"Tests the instantiation of SpotiBot objects from raw API responses\r\n and their conversion to byte-code based on the object's property's/methods\r\n\r\n Args:\r\n result: PyTest fixture containing a dictionary of object entries\r\n mirroring the below.\r\n\r\n {object name:\r\n (object Class,\r\n raw API representation pre-instantiation,\r\n representation post-instantiation as of last stable build\r\n )\r\n }\r\n\r\n Objects currently covered are:\r\n : music.Track\r\n : music.Album\r\n : music.Artist\r\n : device.Device\r\n : context.Context\r\n \"\"\"\r\n for obj_name, nested_val in result.items():\r\n spot_obj, result_in, expected_out = nested_val\r\n instantiated = spot_obj(result_in)\r\n assert instantiated == expected_out\r\n assert is_jsonable(instantiated.json)\r\n\r\n\r\n\r\n# -----------------------------------------------------------------------------\r\n\r\n\r\n# def test_album(result):\r\n# to_instantiate = result.get('item').get('album')\r\n# instantiated = Music.Album(to_instantiate)\r\n# assert isinstance(instantiated, Music.Album)\r\n#\r\n#\r\n# def test_album_serialization(result):\r\n# to_instantiate = result.get('item').get('album')\r\n# instantiated = Music.Album(to_instantiate)\r\n# assert is_jsonable(instantiated.json)\r\n" }, { "alpha_fraction": 0.756302535533905, "alphanum_fraction": 0.756302535533905, "avg_line_length": 37.66666793823242, "blob_id": "f4d6d32cb08dc1231982fec12ee79977deedd84e", "content_id": "d8f75e5e39012fbc4cb0e9825d4bfddb3610f6bd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 119, "license_type": "no_license", "max_line_length": 103, "num_lines": 3, "path": "/README.md", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "# SpotiBot\r\n\r\n`spotibot` is a set of modules for extracting rich usage data from the Spotify API on an ongoing basis.\r\n" }, { "alpha_fraction": 0.6374407410621643, "alphanum_fraction": 0.6729857921600342, "avg_line_length": 23.823530197143555, "blob_id": "966d852e14c9dbfb07b2afe8ccee6d59295a1939", "content_id": "4ba180cb1e391965a7737c9e01fce42cb8aac7b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 422, "license_type": "no_license", "max_line_length": 70, "num_lines": 17, "path": "/pyproject.toml", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"spotibot\"\nversion = \"0.1.0\"\ndescription = \"A small library to collect Spotify listening activity.\"\nauthors = [\"Grant Murray <[email protected]>\"]\nlicense = \"MIT\"\npackages = [{include = \"spotibot\"}]\ninclude = [\"spotibot/*\"]\n\n[tool.poetry.dependencies]\npython = \"^3.7\"\n\n[tool.poetry.dev-dependencies]\n\n[build-system]\nrequires = [\"poetry>=0.12\", \"pandas>=1.0.3\", \"spotipy>=2.12.0\"]\nbuild-backend = \"poetry.masonry.api\"\n" }, { "alpha_fraction": 0.6618182063102722, "alphanum_fraction": 0.6618182063102722, "avg_line_length": 28.55555534362793, "blob_id": "badba46358a0083b36885e1b0d5dc2c1155c3297", "content_id": "c26a9ffca50fb7fb8c1426be38d615bf2aac8da6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 550, "license_type": "no_license", "max_line_length": 72, "num_lines": 18, "path": "/spotibot/core/utils/GetConfigs.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "# Imports\r\nimport configparser\r\nimport os\r\n\r\n\r\ndef get_spotify_creds():\r\n \"\"\"\r\n Function to read in Spotify API credentials from configuration file.\r\n :return: Tuple of client ID, client secret, and username\r\n \"\"\"\r\n config = configparser.ConfigParser()\r\n config.read(os.path.join(os.getcwd(), \"SpotiBot.ini\"))\r\n\r\n client_id = config.get(\"CLIENT\", \"CLIENT_ID\")\r\n client_secret = config.get(\"CLIENT\", \"CLIENT_SECRET\")\r\n username_str = str(config.get(\"USER\", \"USERNAME\"))\r\n\r\n return client_id, client_secret, username_str\r\n" }, { "alpha_fraction": 0.8157894611358643, "alphanum_fraction": 0.8157894611358643, "avg_line_length": 36, "blob_id": "672133944052cb0ea46e859ef953db7fb31a1113", "content_id": "764c240a2f209cf140a1b28f57298bc256dbcaa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "no_license", "max_line_length": 36, "num_lines": 1, "path": "/spotibot/base/__init__.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "from spotibot.base.auth import OAuth\r\n" }, { "alpha_fraction": 0.6211849451065063, "alphanum_fraction": 0.6211849451065063, "avg_line_length": 29.604394912719727, "blob_id": "451b48412570f0b37092aa1f74a84cba8309b6d7", "content_id": "ff9f2b621c80ae3e8bfedf3176cc890061c2a2ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2785, "license_type": "no_license", "max_line_length": 78, "num_lines": 91, "path": "/spotibot/core/objects/Context.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import json\nfrom spotibot.mongo.utils.Handlers import object_handler, get_serializable\n\n\nclass Context:\n \"\"\"Auto-generated attribute instantiation docstring for context\n object\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n type (str): The object type, e.g. artist, playlist, album.\n href (str): A link to the Web API endpoint providing full\n details of the track.\n external_urls (an external URL object): External URLs for this\n context.\n uri (str): The Spotify URI for the context.\n \"\"\"\n\n def __init__(self, context: dict):\n\n if context:\n self.href: str = object_handler(context, \"href\")\n\n if context:\n self.type: str = object_handler(context, \"type\")\n\n if context:\n self.uri: str = object_handler(context, \"uri\")\n\n if context:\n self.external_urls: str = object_handler(context, \"external_urls\")\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n" }, { "alpha_fraction": 0.4718964397907257, "alphanum_fraction": 0.4747731685638428, "avg_line_length": 29.058420181274414, "blob_id": "373f8ae4a0292ddd63fdc9517d2d961ce6e0f6ce", "content_id": "c4b9e9dd2c60964fcfb59677ed1e408c22785d3e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9038, "license_type": "no_license", "max_line_length": 81, "num_lines": 291, "path": "/utils_independent/DocParser.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nimport os\r\nfrom textwrap import TextWrapper\r\nfrom io import StringIO\r\nfrom html.parser import HTMLParser\r\nimport string\r\n\r\n\r\ndef __main__():\r\n \"\"\"Script parses API documentation into Google style docstrings.\r\n\r\n End output is the 'Object Model Auto-Generated Docstrings.md' file\r\n containing all class docstrings which are then added to all SpotiBot\r\n classes that directly mirror the Spotify object model.\r\n \"\"\"\r\n\r\n def next_element(elem):\r\n while elem is not None:\r\n # Find next element, skip NavigableString objects\r\n elem = elem.next_sibling\r\n if hasattr(elem, 'name'):\r\n return elem\r\n\r\n def get_pages(tags, header='h3'):\r\n pages = []\r\n for tag in tags:\r\n page = [str(tag)]\r\n elem = next_element(tag)\r\n while elem and elem.name != header:\r\n page.append(str(elem))\r\n elem = next_element(elem)\r\n pages.append('\\n'.join(page))\r\n\r\n return pages\r\n\r\n def get_table_val_dict(pages, header):\r\n\r\n table_dict = {}\r\n for page in pages:\r\n\r\n id = re.findall(f'>(.*)</{header}>', page)\r\n if id:\r\n soupified = BeautifulSoup(page, 'html.parser')\r\n table_dict[id[0]] = soupified\r\n else:\r\n pass\r\n\r\n table_dict = {k: v for k, v in table_dict.items()\r\n if not re.findall('/', k) and k != 'Example'}\r\n\r\n table_val_dict = {}\r\n for head, soup in table_dict.items():\r\n cols = soup.find_all('tr').__str__().split(r'</tr>')\r\n cols = [re.findall(r'<td\\>(.*)</td>', col) for col in cols\r\n if re.findall(r'<td\\>(.*)</td>', col)]\r\n fields = {}\r\n for col in cols:\r\n\r\n if len(col) == 3:\r\n name, typ, desc = col\r\n desc = \\\r\n desc.__str__() \\\r\n .replace(r'<code class=\"highlighter-rouge\">', '``') \\\r\n .replace(r'</code>', '``')\r\n fields[name] = {'type': typ,\r\n 'description': desc}\r\n\r\n if fields:\r\n table_val_dict[head] = fields\r\n else:\r\n pass\r\n\r\n return table_val_dict\r\n\r\n def field_dict_from_href(href, header='h3'):\r\n response = requests.get(href)\r\n\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n\r\n tags = soup.find_all(header)\r\n\r\n pages = get_pages(tags, header=header)\r\n\r\n field_dict = get_table_val_dict(pages, header=header)\r\n\r\n return field_dict, soup\r\n\r\n class MLStripper(HTMLParser):\r\n \"\"\"\r\n Note: Code directly vendor'd in from the following link\r\n https://stackoverflow.com/questions/753052/strip\r\n -html-from-strings-in-python\r\n \"\"\"\r\n\r\n def __init__(self):\r\n super().__init__()\r\n self.reset()\r\n self.strict = False\r\n self.convert_charrefs = True\r\n self.text = StringIO()\r\n\r\n def handle_data(self, d):\r\n self.text.write(d)\r\n\r\n def get_data(self):\r\n return self.text.getvalue()\r\n\r\n def strip_tags(html):\r\n s = MLStripper()\r\n s.feed(html)\r\n return s.get_data()\r\n\r\n class Desc:\r\n\r\n def __init__(self, desc: str):\r\n # self.desc = desc\r\n # TODO: Do all the stripping of links and stuff at this level\r\n\r\n self.original = \\\r\n desc\r\n\r\n self.stripped = \\\r\n strip_tags(''.join(\r\n char for char in desc if char in string.printable))\r\n\r\n class Typ:\r\n\r\n def __init__(self, typ):\r\n self.types = \\\r\n {'boolean': 'bool',\r\n 'string': 'str',\r\n 'integer': 'int'}\r\n\r\n self.original = \\\r\n typ\r\n\r\n self.revised = \\\r\n self.types.get(typ, typ)\r\n\r\n class Name:\r\n\r\n def __init__(self, name):\r\n self.original = \\\r\n name\r\n\r\n self.stripped = \\\r\n strip_tags(''.join(\r\n char for char in name if char in string.printable))\r\n\r\n class Attr(Name):\r\n\r\n def __init__(self, name: str, attrs):\r\n super().__init__(name)\r\n\r\n self.type: Typ = \\\r\n Typ(attrs.get('type'))\r\n\r\n self.desc = \\\r\n Desc(attrs.get('description'))\r\n\r\n class Wrapper:\r\n attr_wrapper = \\\r\n TextWrapper(width=66, fix_sentence_endings=True, tabsize=4,\r\n initial_indent='\\t\\t',\r\n subsequent_indent='\\t\\t\\t')\r\n other_wrapper = \\\r\n TextWrapper(width=66, fix_sentence_endings=True,\r\n initial_indent='\\t', subsequent_indent='\\t', tabsize=4)\r\n\r\n def __init__(self, raw: str):\r\n self.stripped = strip_tags(raw)\r\n\r\n self.attr = \\\r\n '\\n'.join(self.attr_wrapper.wrap(self.stripped))\r\n\r\n self.other = \\\r\n '\\n'.join(self.other_wrapper.wrap(self.stripped)). \\\r\n lstrip(' ').rstrip(' ')\r\n\r\n class Doc(Attr):\r\n wrapper = TextWrapper(width=66, fix_sentence_endings=True,\r\n initial_indent='\\t\\t',\r\n subsequent_indent='\\t\\t\\t',\r\n tabsize=4)\r\n\r\n def __init__(self, name, attrs):\r\n super().__init__(name, attrs)\r\n\r\n self.full = \\\r\n f\"{self.stripped} ({self.type.revised}): {self.desc.stripped}\"\r\n\r\n self.param = Wrapper(self.full).attr\r\n\r\n class Item:\r\n\r\n def __init__(self, name, attrs: dict):\r\n self.name = \\\r\n name\r\n\r\n self.spotibot_api_xref = \\\r\n {}\r\n\r\n self.spotibot = \\\r\n self.spotibot_api_xref.get(name)\r\n\r\n self.docs = [Doc(k, v) for k, v in attrs.items()]\r\n\r\n self.header = \\\r\n Wrapper(\r\n f'\"\"\"Auto-generated attribute instantiation docstring for '\r\n f'{name}').other + '\\n\\n'\r\n\r\n self.disclaimer = \\\r\n Wrapper(\r\n f\"Note: Parameter description in below docstring is \"\r\n f\"populated based on the\\n descriptions at the following \"\r\n f\"link:\\n\\thttps://developer.spotify.com/documentation/web\"\r\n f\"-api/reference/object-model\").other + '\\n\\n'\r\n\r\n self.guidance = \\\r\n Wrapper(\r\n f\"Please consult their official documentation for more \"\r\n f\"in-depth information & full-linking across pages.\") \\\r\n .other + '\\n\\n'\r\n\r\n self.attrs = [attr.param for attr in self.docs]\r\n\r\n self.docstring = \\\r\n self.header + self.disclaimer + self.guidance \\\r\n + '\\tAttributes:\\n' + '\\n'.join(self.attrs)\r\n\r\n self.md = \\\r\n f'### {name.title()}' \\\r\n f'\\n```python\\n{self.docstring}\\n\\t\"\"\"\\n```'\r\n\r\n class Model:\r\n\r\n def __init__(self, objects: dict):\r\n\r\n self.objs = {}\r\n for obj_name, obj_attrs in objects.items():\r\n self.objs[obj_name] = Item(obj_name, obj_attrs)\r\n\r\n self.markdowns = \\\r\n [obj.md for obj in self.objs.values()]\r\n\r\n def to_md(self, path_to_write='') -> None:\r\n \"\"\"Combines all markdown objects and writes to a .md file.\r\n\r\n Args:\r\n path_to_write: Full file path to write to\r\n \"\"\"\r\n self.path_to_write = path_to_write\r\n\r\n if not self.path_to_write:\r\n self.path_to_write = \\\r\n os.path.join(os.getcwd(),\r\n 'Object Model Auto-Generated Docstrings.md')\r\n\r\n with open(self.path_to_write, 'w') as f:\r\n f.write('\\n\\n'.join(self.markdowns))\r\n\r\n return None\r\n\r\n href = r'https://developer.spotify.com/documentation' \\\r\n r'/web-api/reference/object-model/#track-object-' \\\r\n r'full'\r\n\r\n href2 = r'https://developer.spotify.com/documentation/web' \\\r\n r'-api/reference/player/get-information-about-the-' \\\r\n r'users-current-playback/'\r\n\r\n # Url to API documentation for majority of objects\r\n track, soup1 = \\\r\n field_dict_from_href(href, header='h2')\r\n\r\n # Url to API documentation for current playback objects\r\n other, soup2 = \\\r\n field_dict_from_href(href2, header='h3')\r\n\r\n # Combining all docs\r\n total = {**track, **other}\r\n\r\n # Exporting to 'Object Model Auto-Generated Docstrings.md'\r\n Model(total).to_md()\r\n\r\n\r\nif __name__ == '__main__':\r\n __main__()\r\n" }, { "alpha_fraction": 0.5315081477165222, "alphanum_fraction": 0.5341043472290039, "avg_line_length": 28.702898025512695, "blob_id": "1956c49d0e6886a615c821b0b88a0ab4f333c504", "content_id": "6360b66f8862dfc96504b8c16d326d427331d883", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4237, "license_type": "no_license", "max_line_length": 88, "num_lines": 138, "path": "/spotibot/base/config/Configuration.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import os\r\nfrom fcache.cache import FileCache\r\nimport json\r\n\r\n\r\nclass ConfigFinder:\r\n def __init__(self, config_file: str = \"SpotiBot.json\") -> None:\r\n \"\"\"Instantiates instances of environment configuration from .ini file.\r\n\r\n Args:\r\n config_file: Name of .ini configuration file following the\r\n format of SpotiBot_SAMPLE.ini\r\n \"\"\"\r\n self.cache = FileCache(config_file.split(r\".\")[0], flag=\"cs\")\r\n self.config_file = config_file\r\n self.path_to_config = self.cache.get(r\"path_to_config\")\r\n\r\n def clear_cache(self) -> object:\r\n \"\"\"Clears cached path to configuration file.\"\"\"\r\n self.cache.clear()\r\n return self\r\n\r\n @property\r\n def cache_exists(self) -> bool:\r\n \"\"\"Checks to see if a cached file path exists to a valid file.\"\"\"\r\n try:\r\n return os.path.isfile(self.path_to_config)\r\n except:\r\n return False\r\n\r\n @property\r\n def cache_is_valid(self) -> bool:\r\n \"\"\"Checks to see if the valid file path contains the config file.\"\"\"\r\n try:\r\n return self.config_file == os.path.basename(self.path_to_config)\r\n except:\r\n return False\r\n\r\n def locate_config(self):\r\n \"\"\"Traverse file system from bottom up to locate config file.\"\"\"\r\n for dirpath, dirnames, files in os.walk(os.path.expanduser(\"~\"), topdown=False):\r\n\r\n if self.config_file in files:\r\n self.path_to_config = os.path.join(dirpath, self.config_file)\r\n break\r\n\r\n else:\r\n self.path_to_config = None\r\n\r\n return self.path_to_config\r\n\r\n def get_path(self) -> str:\r\n \"\"\"Checks for cache existence and validates - traverses OS if not.\"\"\"\r\n print(\"Locating configuration...\")\r\n\r\n print(\"\\t<1 of 2> Checking for cached path...\")\r\n\r\n if self.cache_exists and self.cache_is_valid:\r\n print(f\"\\t<2 of 2> Found cached path: {self.path_to_config}\")\r\n\r\n else:\r\n print(\"\\t<2 of 2> Cached path not found\")\r\n print(f\"\\nLooking for {self.config_file} in local file system..\")\r\n\r\n self.path_to_config = self.locate_config()\r\n\r\n if self.path_to_config:\r\n print(\r\n f\"\\t<1 of 1> '{self.config_file}' found at: \"\r\n f\"{self.path_to_config}\"\r\n )\r\n else:\r\n print(\r\n f\"\\t<1 of 1> Could not find config file\"\r\n f\" {self.config_file} please double check the name of \"\r\n f\"your configuration file or value passed in the\"\r\n f\"'config_file' argument\"\r\n )\r\n\r\n return self.path_to_config\r\n\r\n def read_file(self) -> object:\r\n \"\"\"Locates creds file and caches location.\r\n\r\n Returns:\r\n Dictionary containing SpotiBot configuration params\r\n\r\n \"\"\"\r\n self.path_to_config = self.get_path()\r\n self.cache[\"path_to_config\"] = self.path_to_config\r\n\r\n try:\r\n with open(self.path_to_config, \"r\") as r:\r\n self.cfg = json.load(r)\r\n\r\n except IOError as e:\r\n print(e)\r\n\r\n return self\r\n\r\n\r\nclass Config(ConfigFinder):\r\n def __init__(self, config_file: str = \"SpotiBot.json\"):\r\n\r\n super().__init__(config_file)\r\n\r\n self.path_to_config = self.get_path()\r\n self.cache[\"path_to_config\"] = self.path_to_config\r\n\r\n try:\r\n with open(self.path_to_config, \"r\") as r:\r\n self.cfg = json.load(r)\r\n\r\n except IOError as e:\r\n self.cfg = None\r\n print(e)\r\n\r\n def get_configs(self, keys_to_traverse: list):\r\n\r\n sub = {k: v for k, v in self.cfg.items()}\r\n\r\n for k in keys_to_traverse:\r\n sub = sub[k]\r\n\r\n return sub\r\n\r\n @property\r\n def played_all_time(self):\r\n\r\n sub = self.get_configs([\"PLAYLISTS\", \"ACTIVITY\"])\r\n\r\n all_timers = {}\r\n\r\n for play_type, attrs in sub.items():\r\n playlist_name, playlist_description = list(attrs.values())\r\n all_timers[playlist_name] = playlist_description\r\n\r\n return all_timers\r\n" }, { "alpha_fraction": 0.570576548576355, "alphanum_fraction": 0.5785288214683533, "avg_line_length": 17.346153259277344, "blob_id": "2ecc1d7959288ab4181de78303d96a752e3679b1", "content_id": "0d1ae2c477cd0a5acafa59ae34821d1186405a01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 503, "license_type": "no_license", "max_line_length": 56, "num_lines": 26, "path": "/tests/mongo/utils/test_Handlers.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import pytest\r\n\r\nfrom spotibot.mongo.utils.Handlers import object_handler\r\n\r\n\r\nclass Sample:\r\n\r\n def __init__(self, val: int):\r\n self.attr: int = val\r\n\r\n\r\[email protected](\r\n params=[\r\n (Sample(2), 'attr', 2),\r\n ({'attr': 2}, 'attr', 2)\r\n ],\r\n scope='function'\r\n)\r\ndef setup(request):\r\n obj, key, outcome = request.param\r\n return obj, key, outcome\r\n\r\n\r\ndef test_object_handler(setup):\r\n obj, key, outcome = setup\r\n assert object_handler(obj, key) == outcome\r\n" }, { "alpha_fraction": 0.5749558806419373, "alphanum_fraction": 0.5867136716842651, "avg_line_length": 31.075471878051758, "blob_id": "114be62055721f5449d23f43372f8a957cef874e", "content_id": "bde2c3411fe81cf3f10249e64cb652490ce3d46d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1701, "license_type": "no_license", "max_line_length": 76, "num_lines": 53, "path": "/utils_independent/ImplementObjectHandler.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "\nimport os\nimport re\n\n\ndef __main__():\n \"\"\"One time utility script ran for changes in attribute instantiation.\n\n This script performs the below, one-time conversion on all attributes of\n SpotiBot classes - this is done so that the same codes can instantiate\n json API responses and serialized objects stored in MongoDB without any\n other changes to the data model.\n\n .. code-block:: python\n\n class.get('attr') # original\n\n object_handler(class, 'attr') # revised\n\n\n Date: 2020-06-11\n \"\"\"\n spotibot_root = os.path.join(os.getcwd(), 'spotibot')\n\n files_to_convert = \\\n [os.path.join(dirpath, file) for dirpath, dirnames, files\n in os.walk(spotibot_root, topdown=False) for file in files\n if file not in [r'__init__.py', r'__main__.py']\n and 'cache' not in re.escape(dirpath)\n and 'utils' not in re.escape(dirpath)]\n\n # / Exporting list of files to convert into autodocs directory /\n wr_path = os.path.join(os.getcwd(), 'src_autodoc', '20200610~1.txt')\n assert not os.path.isfile(wr_path)\n with open(wr_path, 'w') as f:\n f.write('\\n'.join([re.escape(file) for file in files_to_convert]))\n\n # / Continuing replacement logic /\n pattern = re.compile(r\"(.*self\\.\\w+:\\s\\w+\\s=\\s\\\\\\n\\s+)[^self]\"\n r\"\\s+(\\w+)\\.get\\(('\\w+')\\)\")\n\n for file in files_to_convert:\n with open(file, 'r') as r:\n obj_py = r.read()\n\n conv_py = pattern.sub(r\"\\1object_handler(\\2, \\3)\", obj_py)\n\n with open(file, 'w') as w:\n w.write(conv_py)\n print(f\"Written modified file to:\\n\\t{file}\\n\")\n\n\nif __name__ == '__main__':\n __main__()\n" }, { "alpha_fraction": 0.6184372305870056, "alphanum_fraction": 0.6185735464096069, "avg_line_length": 29.301652908325195, "blob_id": "7b391bba98a61db20cd6a92ee3b50adf69e93113", "content_id": "0902f8ee4d4153e8f5bf7c19ebb3654d9ac78b5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7333, "license_type": "no_license", "max_line_length": 78, "num_lines": 242, "path": "/spotibot/core/objects/General.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import json\nfrom spotibot.mongo.utils.Handlers import object_handler, get_serializable\n\n\nclass ExternalId:\n \"\"\"Auto-generated attribute instantiation docstring for external\n ID object\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n {key} (str): The identifier type, for example:- ``\"isrc\"`` -\n International Standard Recording Code- ``\"ean\"`` -\n International Article Number- ``\"upc\"`` - Universal Product\n Code\n {value} (str): An external identifier for the object.\n \"\"\"\n\n def __init__(self, external_id: dict):\n\n self.typ: str = list(external_id.keys())[0]\n\n self.id: str = object_handler(external_id, self.typ)\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n\nclass ExternalUrl:\n \"\"\"Auto-generated attribute instantiation docstring for external\n URL object\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n typ (str) The type of the URL, for example:- ``\"spotify\"`` -\n The Spotify URL for the object.\n url (str): An external, public URL to the object.\n \"\"\"\n\n def __init__(self, url):\n\n self.typ: str = object_handler(url, \"typ\")\n\n self.url: str = object_handler(url, \"url\")\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n\nclass Image:\n \"\"\"Auto-generated attribute instantiation docstring for image\n object\n\n Note: Parameter description in below docstring is populated based\n on the descriptions at the following link:\n https://developer.spotify.com/documentation/web-\n api/reference/object-model\n\n Please consult their official documentation for more in-depth\n information & full-linking across pages.\n\n Attributes:\n height (int): The image height in pixels. If unknown: ``null``\n or not returned.\n url (str): The source URL of the image.\n width (int): The image width in pixels. If unknown: ``null`` or\n not returned.\n \"\"\"\n\n def __init__(self, image):\n if image:\n self.height: int = object_handler(image, \"height\")\n\n self.url: str = object_handler(image, \"url\")\n\n self.width: int = object_handler(image, \"width\")\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n" }, { "alpha_fraction": 0.5868651866912842, "alphanum_fraction": 0.6090110540390015, "avg_line_length": 24.724489212036133, "blob_id": "a06d030909aa564002dc2613e892b0ecab779413", "content_id": "88cbfb35387b9785384d7d67169fe11300f289c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2619, "license_type": "no_license", "max_line_length": 86, "num_lines": 98, "path": "/spotibot/core/utils/Hasher.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "# Imports\r\nimport hashlib\r\nimport pandas as pd\r\nimport calendar\r\nimport datetime as dt\r\n\r\n\r\ndef quick_hash(unhashed):\r\n \"\"\"\r\n Quick hashing function to truncate the length playlist_id:\r\n snapshot_id key.\r\n :param unhashed:\r\n :return:\r\n \"\"\"\r\n hashed_base = hashlib.md5(unhashed.encode(\"utf-8\"))\r\n hashed_str = hashed_base.hexdigest()\r\n return hashed_str\r\n\r\n\r\ndef dict_to_df(nested_dict, id_col=\"id\"):\r\n \"\"\"Fully flatten single-layer nested DataFrame into fully tabular form,\r\n distinct on\r\n 'id' which is just a generated UUID for every time the script runs\r\n \"\"\"\r\n df = pd.DataFrame(nested_dict.values(), index=nested_dict.keys())\r\n df.index.name = id_col\r\n df.reset_index(inplace=True)\r\n return df\r\n\r\n\r\ndef dict_from_df(df: pd.DataFrame, key_col: str, val_col: str) -> dict:\r\n \"\"\"\r\n Simple helper function to return dictionary from two columns within a\r\n DataFrame\r\n :param df: DataFrame to extract dictionary from\r\n :param key_col: Column to serve as key in dictionary\r\n :param val_col: Column to serve as value in dictionary\r\n :return: Dictionary of distinct key: value pairs from column\r\n \"\"\"\r\n to_return = {\r\n k: v[val_col]\r\n for k, v in df[[key_col, val_col]].set_index(key_col).to_dict(\"index\").items()\r\n if str(v[val_col]) != \"nan\"\r\n }\r\n\r\n return to_return\r\n\r\n\r\ndef to_seconds(milliseconds):\r\n seconds = round(milliseconds / 1000, 2)\r\n return seconds\r\n\r\n\r\ndef abs_secs_delta_from_ms(ms1, ms2):\r\n seconds1 = round(ms1 / 1000, 2)\r\n seconds2 = round(ms2 / 1000, 2)\r\n abs_delta = round(abs(seconds1 - seconds2), 2)\r\n return abs_delta\r\n\r\n\r\ndef get_numeric_tmstmp(tmstmp, return_string=False):\r\n parsed_tmstmp = dp.parse(str(tmstmp))\r\n numeric_tmstmp = parsed_tmstmp.strftime(\"%Y%m%d%H%M%S%f\")\r\n if return_string is True:\r\n return numeric_tmstmp\r\n else:\r\n return float(numeric_tmstmp)\r\n\r\n\r\n# -----------------\r\ndef iso8601_to_unix(iso_string, includes_tz=True):\r\n if includes_tz is True:\r\n return calendar.timegm(\r\n dt.datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S.%fZ\").timetuple()\r\n )\r\n else:\r\n return calendar.timegm(\r\n dt.datetime.strptime(iso_string, \"%Y-%m-%dT%H:%M:%S.%f\").timetuple()\r\n )\r\n\r\n\r\n# -----------------\r\n\r\n\r\ndef ns_to_seconds(epoch_ns):\r\n return epoch_ns * (1 / 1_000_000_000)\r\n\r\n\r\ndef ns_to_ms(epoch_ns):\r\n return epoch_ns * (1 / 1_000_000)\r\n\r\n\r\ndef ms_to_seconds(milliseconds):\r\n return milliseconds * (1 / 1_000)\r\n\r\n\r\ndef seconds_to_ms(seconds):\r\n return seconds * (1_000)\r\n" }, { "alpha_fraction": 0.6057142615318298, "alphanum_fraction": 0.6074285507202148, "avg_line_length": 28.6610164642334, "blob_id": "3c937dff47ebd16f3c0dceef808181c396b3b10b", "content_id": "34cb75818468ea160909cd582280835c91cc01a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5250, "license_type": "no_license", "max_line_length": 88, "num_lines": 177, "path": "/spotibot/core/objects/User.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "import json\nimport jsonpickle\nimport requests\nfrom mongoengine import *\n\nfrom spotibot.mongo.utils.Handlers import get_serializable\n\nfrom spotibot.mongo.core.objects import User as UserDoc\n\nfrom spotibot.base.auth import OAuth as oauth\n\nfrom spotibot.core.endpoints import Generic as genhref\n\nfrom spotibot.base.config import Configuration\n\nfrom spotibot.mongo.conn import Connector\n\nfrom spotibot.core.objects import Music as music, Podcasts as podcasts\n\nconnect(host=Connector.get_creds(collection=\"users\"))\n\nconfig = Configuration.Config()\n\nactivity_playlists = config.get_configs([\"PLAYLISTS\", \"ACTIVITY\"])\n\nactivity_names_to_desc = {\n activity_playlists.get(\"MUSIC\")\n .get(\"NAME\"): activity_playlists.get(\"MUSIC\")\n .get(\"DESCRIPTION\"),\n activity_playlists.get(\"PODCASTS\")\n .get(\"NAME\"): activity_playlists.get(\"PODCASTS\")\n .get(\"DESCRIPTION\"),\n}\n\n\ndef get_user_playlists(href, headers):\n request = requests.get(href, headers=headers)\n result = request.json()\n\n all_playlists = [result[\"items\"]]\n while result.get(\"next\"):\n result = requests.get(result.get(\"next\"), headers=headers).json()\n all_playlists.append(result[\"items\"])\n\n all_playlists = {\n t.get(\"name\"): t.get(\"href\") for batch in all_playlists for t in batch\n }\n\n return all_playlists\n\n\nclass UserDBO(genhref.Href):\n def __init__(self, user_id: str):\n\n super().__init__(username=user_id)\n\n self.user_id: str = user_id\n\n self.mongo_user = UserDoc.User.objects(pk=user_id).first()\n\n self.tokens: oauth.Token = jsonpickle.decode(self.mongo_user.obj)\n\n # Checking for activity playlists and creating if not pre-existing\n activity_names_to_types = {\n config.get_configs([\"PLAYLISTS\", \"ACTIVITY\", \"MUSIC\", \"NAME\"]): music.Track,\n config.get_configs(\n [\"PLAYLISTS\", \"ACTIVITY\", \"PODCASTS\", \"NAME\"]\n ): podcasts.Episode,\n }\n\n names_to_hrefs = {\n k: v\n for k, v in get_user_playlists(self.playlists, self.headers()).items()\n if activity_names_to_types.get(k)\n }\n\n activity_names_to_create = [\n k for k in activity_names_to_types.keys() if not names_to_hrefs.get(k)\n ]\n\n for name in activity_names_to_create:\n desc = activity_names_to_desc.get(name)\n req_href, req_payload = self.new_playlist(\n playlist_name=name, playlist_desc=desc\n )\n\n request = requests.post(req_href, data=req_payload, headers=self.headers())\n if request.ok:\n result = request.json()\n names_to_hrefs.update({result.get(\"name\"): result.get(\"href\")})\n\n activity_playlist_hrefs = {\n \"track\": names_to_hrefs.get(activity_playlists.get(\"MUSIC\").get(\"NAME\")),\n \"episode\": names_to_hrefs.get(\n activity_playlists.get(\"PODCASTS\").get(\"NAME\")\n ),\n }\n\n self.activity_playlist_hrefs = {\n k: f\"{v}/tracks?\" for k, v in activity_playlist_hrefs.items()\n }\n\n def refresh_tokens(self):\n self.tokens = self.tokens.refresh()\n self.mongo_user = oauth.UserDBI(self.tokens).save()\n\n return self\n\n def headers(self, post=False):\n\n if self.tokens.is_expired:\n self.refresh_tokens()\n\n if not post:\n return {\"Authorization\": f\"Bearer {self.tokens.access_token}\"}\n else:\n return {\n \"Authorization\": f\"Bearer {self.tokens.access_token}\",\n \"Content-Type\": \"application/json\",\n }\n\n def __eq__(self, other) -> bool:\n \"\"\"Equality comparison to other objects.\n\n Args:\n other: Comparison object\n\n Returns:\n Boolean value indicating whether or not the attributes and their\n associated values are equal between the two objects\n \"\"\"\n return vars(self) == vars(other)\n\n def __getitem__(self, item: str):\n \"\"\"Getter method for subscriptability.\n\n Args:\n item: Attribute to get the value of\n\n Returns:\n Attribute value if exists in object's namespace\n \"\"\"\n return getattr(self, item)\n\n def get(self, item: str, default=None):\n \"\"\"Method for extracting attributes without throwing existence errors.\n\n Args:\n item: Attribute to get the value of\n default: Return value if attribute doesn't exist\n\n Returns:\n Attribute value or default if attribute does not exist\n \"\"\"\n return vars(self).get(item, default)\n\n def to_dict(self) -> dict:\n \"\"\"Calling utility serialization method on all attributes.\n\n Returns:\n String following valid json structure for mongo serialization.\n \"\"\"\n return {k: get_serializable(v) for k, v in vars(self).items()}\n\n @property\n def json(self) -> str:\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\n serialization purposes\n\n Returns:\n Serializable 'json' output of SpotiBot object\n \"\"\"\n return json.dumps(self.to_dict())\n\n\n# test = UserDBO('125393293')\n# test.tokens.is_expired\n" }, { "alpha_fraction": 0.6302083134651184, "alphanum_fraction": 0.6302083134651184, "avg_line_length": 19.33333396911621, "blob_id": "f03fa7c81c49decb70733c7f945b4029b02cef59", "content_id": "5caad6f829fc8e3cb4deb0ca7e131e2db597688b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 38, "num_lines": 9, "path": "/spotibot/__init__.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "# from spotibot.core.utils import \\\r\n# Hasher, \\\r\n# FileManager, \\\r\n# GetConfigs\r\n\r\n# from spotibot.base.auth import OAuth\r\n\r\n# from spotibot.mongo.utils import \\\r\n# Handlers\r\n" }, { "alpha_fraction": 0.6723484992980957, "alphanum_fraction": 0.6723484992980957, "avg_line_length": 24.399999618530273, "blob_id": "7379b69c97e67d3c45339b8dd148784dc0864a8c", "content_id": "c1732adb2ad0c556843aba6f74f5454999be3554", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 528, "license_type": "no_license", "max_line_length": 79, "num_lines": 20, "path": "/spotibot/mongo/core/objects/User.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "# import configparser\r\n# import os\r\n\r\n# config = configparser.ConfigParser()\r\n# config.read(os.path.join(os.getcwd(), 'mongo_creds.cfg'))\r\n\r\nfrom mongoengine import *\r\n\r\n# from spotibot.mongo.conn import \\\r\n# Connector as conn\r\n\r\n# conn_str = conn.get_creds(collection='users')\r\n# connect(host=conn_str)\r\n\r\n\r\nclass User(Document):\r\n username: str = StringField(required=True, primary_key=True)\r\n obj: str = StringField(required=True)\r\n\r\n meta = {\"allow_inheritance\": True, \"collection\": \"users\", \"db\": \"SpotiBot\"}\r\n" }, { "alpha_fraction": 0.5711438059806824, "alphanum_fraction": 0.5718082785606384, "avg_line_length": 26.472972869873047, "blob_id": "a7dd86bc15200a863f3fd307c68f2ddfbf73b2ca", "content_id": "85201179393fa293d75ce0aa96b9fdb09bce52eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10535, "license_type": "no_license", "max_line_length": 79, "num_lines": 370, "path": "/spotibot/base/auth/OAuth.py", "repo_name": "GEM7318/SpotiBot", "src_encoding": "UTF-8", "text": "# ---------------/ Gets token object for a new user /--------------------------\r\n\r\nimport json\r\nimport jsonpickle\r\nimport time\r\nfrom requests_oauth2 import OAuth2\r\n\r\nfrom spotibot.mongo.utils.Handlers import get_serializable\r\n\r\nfrom spotibot.core.objects import Time as spottime\r\n\r\nfrom spotibot.mongo.core.objects import User as UserDoc\r\n\r\n\r\n# from spotibot.mongo.conn import \\\r\n# Connector\r\n\r\n\r\nclass SpotifyClient(OAuth2):\r\n def __eq__(self, other) -> bool:\r\n \"\"\"Equality comparison to other objects.\r\n\r\n Args:\r\n other: Comparison object\r\n\r\n Returns:\r\n Boolean value indicating whether or not the attributes and their\r\n associated values are equal between the two objects\r\n \"\"\"\r\n return vars(self) == vars(other)\r\n\r\n def __getitem__(self, item: str):\r\n \"\"\"Getter method for subscriptability.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n\r\n Returns:\r\n Attribute value if exists in object's namespace\r\n \"\"\"\r\n return getattr(self, item)\r\n\r\n def get(self, item: str, default=None):\r\n \"\"\"Method for extracting attributes without throwing existence errors.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n default: Return value if attribute doesn't exist\r\n\r\n Returns:\r\n Attribute value or default if attribute does not exist\r\n \"\"\"\r\n return vars(self).get(item, default)\r\n\r\n def to_dict(self) -> dict:\r\n \"\"\"Calling utility serialization method on all attributes.\r\n\r\n Returns:\r\n String following valid json structure for mongo serialization.\r\n \"\"\"\r\n return {k: get_serializable(v) for k, v in vars(self).items()}\r\n\r\n @property\r\n def json(self) -> str:\r\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\r\n serialization purposes\r\n\r\n Returns:\r\n Serializable 'json' output of SpotiBot object\r\n \"\"\"\r\n return json.dumps(self.to_dict())\r\n\r\n\r\nclass Client:\r\n def __init__(self, cfg: dict):\r\n self.cfg = cfg\r\n\r\n attrs = cfg.get(\"CLIENT\")\r\n\r\n self.client_id = attrs.get(\"CLIENT_ID\")\r\n\r\n self.client_secret = attrs.get(\"CLIENT_SECRET\")\r\n\r\n self.redirect_uri = attrs.get(\"REDIRECT_URI\")\r\n\r\n\r\nclass API(Client):\r\n def __init__(self, cfg: dict):\r\n super().__init__(cfg)\r\n\r\n attrs = self.cfg.get(\"AUTH\")\r\n\r\n self.site = attrs.get(\"SITE\")\r\n\r\n self.authorization_url = attrs.get(\"AUTHORIZATION_URL\")\r\n\r\n self.token_url = attrs.get(\"TOKEN_URL\")\r\n\r\n self.scope = attrs.get(\"SCOPE\")\r\n\r\n\r\nclass Auth(API):\r\n def __init__(self, cfg: dict, username: str):\r\n super().__init__(cfg)\r\n\r\n self.username = username\r\n\r\n self.creds = SpotifyClient(\r\n site=self.site,\r\n authorization_url=str(self.authorization_url),\r\n token_url=self.token_url,\r\n scope_sep=\" \",\r\n client_id=self.client_id,\r\n client_secret=self.client_secret,\r\n redirect_uri=self.redirect_uri,\r\n )\r\n\r\n self.authorized_url = self.creds.authorize_url(\r\n scope=self.scope, response_type=\"code\", username=username\r\n )\r\n\r\n def __eq__(self, other) -> bool:\r\n \"\"\"Equality comparison to other objects.\r\n\r\n Args:\r\n other: Comparison object\r\n\r\n Returns:\r\n Boolean value indicating whether or not the attributes and their\r\n associated values are equal between the two objects\r\n \"\"\"\r\n return vars(self) == vars(other)\r\n\r\n def __getitem__(self, item: str):\r\n \"\"\"Getter method for subscriptability.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n\r\n Returns:\r\n Attribute value if exists in object's namespace\r\n \"\"\"\r\n return getattr(self, item)\r\n\r\n def get(self, item: str, default=None):\r\n \"\"\"Method for extracting attributes without throwing existence errors.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n default: Return value if attribute doesn't exist\r\n\r\n Returns:\r\n Attribute value or default if attribute does not exist\r\n \"\"\"\r\n return vars(self).get(item, default)\r\n\r\n def to_dict(self) -> dict:\r\n \"\"\"Calling utility serialization method on all attributes.\r\n\r\n Returns:\r\n String following valid json structure for mongo serialization.\r\n \"\"\"\r\n return {k: get_serializable(v) for k, v in vars(self).items()}\r\n\r\n @property\r\n def json(self) -> str:\r\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\r\n serialization purposes\r\n\r\n Returns:\r\n Serializable 'json' output of SpotiBot object\r\n \"\"\"\r\n return json.dumps(self.to_dict())\r\n\r\n\r\nclass Token:\r\n def __init__(self, authorized: Auth, url: str):\r\n\r\n self.authorized = authorized\r\n\r\n _, self.code = url.split(r\"?code=\")\r\n\r\n self.grant_time = int(time.time())\r\n\r\n self.data = self.authorized.creds.get_token(\r\n code=self.code,\r\n grant_type=\"authorization_code\",\r\n redirect_uri=self.authorized.redirect_uri,\r\n )\r\n\r\n self.retries: list = []\r\n\r\n @property\r\n def access_token(self):\r\n return self.data.get(\"access_token\")\r\n\r\n @property\r\n def refresh_token(self):\r\n return self.data.get(\"refresh_token\")\r\n\r\n @property\r\n def expiration_time(self):\r\n return self.grant_time + self.data.get(\"expires_in\")\r\n\r\n @property\r\n def scope(self):\r\n return self.scope\r\n\r\n @property\r\n def is_expired(self):\r\n try:\r\n return (self.expiration_time - 10) < time.time()\r\n except IOError as e:\r\n print(e)\r\n\r\n @property\r\n def expires_in(self):\r\n return spottime.Timestamp(\r\n (self.expiration_time - 10) - time.time(), base=\"seconds\"\r\n )\r\n\r\n def refresh(self):\r\n\r\n self.grant_time = int(time.time())\r\n\r\n data_refresh = self.authorized.creds.refresh_token(\r\n grant_type=\"refresh_token\", refresh_token=self.refresh_token,\r\n )\r\n\r\n if not data_refresh.get(\"access_token\"):\r\n\r\n data_refresh = self.authorized.creds.refresh_token(\r\n grant_type=\"refresh_token\", refresh_token=self.refresh_token,\r\n )\r\n\r\n self.retries.append(vars(data_refresh))\r\n\r\n assert data_refresh.get(\r\n \"access_token\"\r\n ), f\"Access token not returned after {len(self.retries)} retries\"\r\n\r\n self.data.update(data_refresh)\r\n\r\n return self\r\n\r\n def __eq__(self, other) -> bool:\r\n \"\"\"Equality comparison to other objects.\r\n\r\n Args:\r\n other: Comparison object\r\n\r\n Returns:\r\n Boolean value indicating whether or not the attributes and their\r\n associated values are equal between the two objects\r\n \"\"\"\r\n return vars(self) == vars(other)\r\n\r\n def __getitem__(self, item: str):\r\n \"\"\"Getter method for subscriptability.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n\r\n Returns:\r\n Attribute value if exists in object's namespace\r\n \"\"\"\r\n return getattr(self, item)\r\n\r\n def get(self, item: str, default=None):\r\n \"\"\"Method for extracting attributes without throwing existence errors.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n default: Return value if attribute doesn't exist\r\n\r\n Returns:\r\n Attribute value or default if attribute does not exist\r\n \"\"\"\r\n return vars(self).get(item, default)\r\n\r\n def to_dict(self) -> dict:\r\n \"\"\"Calling utility serialization method on all attributes.\r\n\r\n Returns:\r\n String following valid json structure for mongo serialization.\r\n \"\"\"\r\n return {k: get_serializable(v) for k, v in vars(self).items()}\r\n\r\n @property\r\n def json(self) -> str:\r\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\r\n serialization purposes\r\n\r\n Returns:\r\n Serializable 'json' output of SpotiBot object\r\n \"\"\"\r\n return json.dumps(self.to_dict())\r\n\r\n\r\nclass UserDBI:\r\n def __init__(self, token: Token):\r\n self.token = token\r\n\r\n self.username = self.token.authorized.username\r\n\r\n self.pickled = jsonpickle.encode(self.token)\r\n\r\n self.mongo = UserDoc.User.from_json(\r\n json.dumps({\"username\": self.username, \"obj\": self.pickled})\r\n )\r\n\r\n def save(self, **kwargs):\r\n if UserDoc.User.objects(pk=self.username).first():\r\n UserDoc.User.objects(pk=self.username).delete()\r\n self.mongo.save(force_insert=True, **kwargs)\r\n\r\n return self\r\n\r\n def __eq__(self, other) -> bool:\r\n \"\"\"Equality comparison to other objects.\r\n\r\n Args:\r\n other: Comparison object\r\n\r\n Returns:\r\n Boolean value indicating whether or not the attributes and their\r\n associated values are equal between the two objects\r\n \"\"\"\r\n return vars(self) == vars(other)\r\n\r\n def __getitem__(self, item: str):\r\n \"\"\"Getter method for subscriptability.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n\r\n Returns:\r\n Attribute value if exists in object's namespace\r\n \"\"\"\r\n return getattr(self, item)\r\n\r\n def get(self, item: str, default=None):\r\n \"\"\"Method for extracting attributes without throwing existence errors.\r\n\r\n Args:\r\n item: Attribute to get the value of\r\n default: Return value if attribute doesn't exist\r\n\r\n Returns:\r\n Attribute value or default if attribute does not exist\r\n \"\"\"\r\n return vars(self).get(item, default)\r\n\r\n def to_dict(self) -> dict:\r\n \"\"\"Calling utility serialization method on all attributes.\r\n\r\n Returns:\r\n String following valid json structure for mongo serialization.\r\n \"\"\"\r\n return {k: get_serializable(v) for k, v in vars(self).items()}\r\n\r\n @property\r\n def json(self) -> str:\r\n \"\"\"Jsonified/string attribute for all SpotiBot objects for mongo\r\n serialization purposes\r\n\r\n Returns:\r\n Serializable 'json' output of SpotiBot object\r\n \"\"\"\r\n return json.dumps(self.to_dict())\r\n" } ]
37
kyle-gross/AirBnB_clone
https://github.com/kyle-gross/AirBnB_clone
0ddb625bf3220ae4f6891bdbf24f3eb3688adfc1
2b30a4cd413c1df0ccef545642c5de43c2e64902
2ab062876546ebd5b9af34270f4e1b74ba4cab06
refs/heads/main
2023-05-30T10:39:40.940871
2021-06-03T00:03:34
2021-06-03T00:03:34
368,580,554
0
1
null
2021-05-18T15:29:03
2021-05-23T22:06:06
2021-05-25T21:46:36
Python
[ { "alpha_fraction": 0.48661598563194275, "alphanum_fraction": 0.49355584383010864, "avg_line_length": 31.363636016845703, "blob_id": "4ef2bb8b82ae1f818682ecab9c8d33afc01e7f7e", "content_id": "2743066d08ec3c907fe4e98e06cea67897175284", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6052, "license_type": "no_license", "max_line_length": 79, "num_lines": 187, "path": "/console.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"\nThis module contains the console (command interpreter) for\nthe Holberton School AirBnB project\n\"\"\"\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\nfrom models.engine.file_storage import FileStorage\nimport cmd\nimport sys\nimport models\n\n\nclass HBNBCommand(cmd.Cmd):\n \"\"\"This class contains the console for HBnB project\"\"\"\n\n obj_dict = {\"BaseModel\": BaseModel, \"User\": User, \"Place\": Place,\n \"State\": State, \"City\": City, \"Amenity\": Amenity,\n \"Review\": Review}\n\n prompt = '(hbnb)'\n\n def do_all(self, arg):\n \"\"\"Prints all str representations of all instances based on class name\n if no class name, prints all instances of all objects\"\"\"\n arg_list = arg.split(\" \")\n my_dict = models.storage.all()\n if len(arg) == 0:\n for i in my_dict.values():\n print(i)\n elif arg_list[0] not in self.obj_dict.keys():\n print(\"** class doesn't exist **\")\n else:\n for k, v in my_dict.items():\n class_name = k.split('.')\n if class_name[0] == arg_list[0]:\n print(v)\n\n def help_all(self):\n \"\"\"Help for all\"\"\"\n print(\"Prints string representation of all instances\")\n\n def do_update(self, arg):\n \"\"\"Update an instance based on class name and id\"\"\"\n arg_list = arg.split(\" \")\n my_dict = models.storage.all()\n signal = 0\n if len(arg) == 0:\n print(\"** class name missing **\")\n return\n elif arg_list[0] not in self.obj_dict.keys():\n print(\"** class doesn't exist **\")\n return\n elif len(arg_list) < 2:\n print(\"** instance id missing **\")\n return\n else:\n new_str = str(arg_list[0] + '.' + arg_list[1])\n for k in my_dict.keys():\n if new_str == k:\n signal = 1\n break\n if signal == 0:\n print(\"** no instance found **\")\n return\n if len(arg_list) < 3:\n print(\"** attribute name missing **\")\n return\n elif len(arg_list) < 4:\n print(\"** value missing **\")\n return\n else:\n new_dict = my_dict[new_str]\n try:\n attr = getattr(new_dict, arg_list[2])\n if type(attr) is int:\n setattr(new_dict, arg_list[2], int(arg_list[3]))\n elif type(attr) is float:\n setattr(new_dict, arg_list[2], float(arg_list[3]))\n else:\n setattr(new_dict, arg_list[2], arg_list[3][1:-1])\n except:\n setattr(new_dict, arg_list[2], arg_list[3][1:-1])\n models.storage.save()\n\n def do_show(self, arg):\n \"\"\"Prints str representation of an instance\"\"\"\n new_list = arg.split(\" \")\n if len(arg) == 0:\n print(\"** class name missing **\")\n elif new_list[0] not in self.obj_dict.keys():\n print(\"** class doesn't exist **\")\n elif len(new_list) == 1:\n print(\"** instance id missing **\")\n else:\n my_dict = models.storage.all()\n new_str = str(new_list[0] + '.' + new_list[1])\n for k, v in my_dict.items():\n if k == new_str:\n print(v)\n return\n print(\"** no instance found **\")\n\n def help_show(self):\n \"\"\"Help for show\"\"\"\n print(\"Prints the string representation of an instance.\")\n\n def do_create(self, arg):\n \"\"\"Creates new BaseModel objects\"\"\"\n if len(arg) == 0:\n print(\"** class name missing **\")\n elif arg not in self.obj_dict.keys():\n print(\"** class doesn't exist **\")\n else:\n obj = self.obj_dict[arg]()\n obj.save()\n print(obj.id)\n\n def help_create(self):\n \"\"\"Help for create\"\"\"\n print(\"Creates new instance of BaseModel, saves it, and prints the id\")\n\n def do_destroy(self, arg):\n \"\"\"Destroys an instance based on class name and id\"\"\"\n new_list = arg.split(\" \")\n if len(arg) == 0:\n print(\"** class name missing **\")\n elif new_list[0] not in self.obj_dict.keys():\n print(\"** class doesn't exist **\")\n elif len(new_list) == 1:\n print(\"** instance id missing **\")\n else:\n my_dict = models.storage.all()\n new_str = str(new_list[0] + '.' + new_list[1])\n for k in my_dict.keys():\n if k == new_str:\n my_dict.pop(k)\n models.storage.save()\n return\n print(\"** no instance found **\")\n\n def help_destroy(self):\n \"\"\"Help for destroy\"\"\"\n print(\"Destroys an instance of an object based on class name and id\")\n\n def do_quit(self, arg):\n \"\"\"Execute quit\"\"\"\n sys.exit(1)\n\n def help_quit(self):\n \"\"\"Help for quit\"\"\"\n print(\"Quit command to exit the program\")\n\n def do_EOF(self, line):\n \"\"\"Execute EOF\"\"\"\n return True\n\n def help_EOF(self):\n \"\"\"Help for EOF\"\"\"\n print(\"EOF command to exit the program\")\n\n def emptyline(self):\n pass\n\n def default(self, line):\n try:\n line_list = line.split(\".\")\n obj = line_list[0]\n cmnd = line_list[1]\n if cmnd == \"all()\":\n self.do_all(obj)\n if cmnd == \"count()\":\n for k, v in self.obj_dict.items():\n if k == obj:\n print(\"{}\".format(v.count))\n break\n \n except:\n print(\"***Unknown syntax: {}\".format(line))\n\nif __name__ == '__main__':\n HBNBCommand().cmdloop()\n" }, { "alpha_fraction": 0.4952004551887512, "alphanum_fraction": 0.49858835339546204, "avg_line_length": 29.534482955932617, "blob_id": "284dcad14fe7aa5babfbd567cc70e6da698d2164", "content_id": "b91d094b8b8d01d9201f81be4760bde5cb94bf9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1771, "license_type": "no_license", "max_line_length": 74, "num_lines": 58, "path": "/models/base_model.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"\nModule for BaseModel class\n\"\"\"\nfrom datetime import datetime\nimport uuid\nimport models\n\nclass BaseModel:\n \"\"\"\n Class that is the base of other models\n \"\"\"\n\n count = 0\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor of class BaseModel instance\n \"\"\"\n if kwargs is not None and len(kwargs) != 0:\n for i in kwargs:\n if i == \"__class__\":\n continue\n if i == \"created_at\" or i == \"updated_at\":\n kwargs[i] = datetime.strptime(kwargs[i],\n \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, i, kwargs[i])\n BaseModel.count += 1\n else:\n self.id = str(uuid.uuid4())\n self.created_at = datetime.now(tz=None)\n self.updated_at = self.created_at\n models.storage.new(self)\n BaseModel.count += 1\n\n def __str__(self):\n \"\"\"\n String representation of the instance created\n \"\"\"\n return \"[{}] ({}) {}\".format(type(self).__name__, self.id,\n self.__dict__)\n\n def save(self):\n \"\"\"\n Updates 'updated_at' with current datetime\n \"\"\"\n self.updated_at = datetime.today()\n models.storage.save()\n\n def to_dict(self):\n \"\"\"\n Returns dictionary containing all keys and values of instance\n Filters data that starts with underscores, methods, and functions.\n \"\"\"\n my_dict = self.__dict__.copy()\n my_dict['__class__'] = type(self).__name__\n my_dict['created_at'] = self.created_at.isoformat()\n my_dict['updated_at'] = self.updated_at.isoformat()\n return my_dict\n" }, { "alpha_fraction": 0.5553763508796692, "alphanum_fraction": 0.5591397881507874, "avg_line_length": 28.0625, "blob_id": "dc9a5483bbd463f47aed708907dc7fb572835cd9", "content_id": "bc198abc2a15f2e268e9cd74fec5840750d21202", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1860, "license_type": "no_license", "max_line_length": 76, "num_lines": 64, "path": "/models/engine/file_storage.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"\nModule for class FileStorage\n\"\"\"\nfrom models.base_model import BaseModel\nfrom models.user import User\nfrom models.place import Place\nfrom models.state import State\nfrom models.city import City\nfrom models.amenity import Amenity\nfrom models.review import Review\nimport json\nimport os\n\n\nclass FileStorage:\n \"\"\"\n Serializes instances to a JSON file\n Deserializes JSON file to instances\n \"\"\"\n\n cls_dict = {\"BaseModel\": BaseModel, \"User\": User, \"Place\": Place,\n \"State\": State, \"City\": City, \"Amenity\": Amenity,\n \"Review\": Review}\n __file_path = 'file.json'\n __objects = {}\n\n def all(self):\n \"\"\"\n Returns the dictionary __objects\n \"\"\"\n return FileStorage.__objects\n\n def new(self, obj):\n \"\"\"\n Sets in __objects the OBJ with key <obj class name>.id\n \"\"\"\n new_key = type(obj).__name__\n new_key += \".\" + obj.id\n FileStorage.__objects[new_key] = obj\n\n def save(self):\n \"\"\"\n Serializes __objects to the JSON file(path:__file_path)\n \"\"\"\n dict2 = {}\n with open(FileStorage.__file_path, mode='w', encoding='utf-8') as f:\n for key, value in FileStorage.__objects.items():\n dict2[key] = value.to_dict()\n json.dump(dict2, f)\n\n def reload(self):\n \"\"\"\n Deserializes the JSON file to __objects\n Only if the JSON file __file_path exists\n \"\"\"\n file_path = FileStorage.__file_path\n if os.path.exists(file_path):\n with open(file_path, mode='r', encoding='utf-8') as f:\n temp_objs = json.load(f)\n for k, v in temp_objs.items():\n new_list = k.split(\".\")\n new_obj = self.cls_dict[new_list[0]]\n FileStorage.__objects[k] = new_obj(**v)\n" }, { "alpha_fraction": 0.6556962132453918, "alphanum_fraction": 0.6582278609275818, "avg_line_length": 20.94444465637207, "blob_id": "9f7311451ea8a4d132711d78203199511a7ec7f0", "content_id": "2520c4380d4e9f7c054fee75b339f821ed0f013b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 64, "num_lines": 18, "path": "/tests/test_models/test_amenity.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"Unit test cases for class Amenity(inherits from BaseModel)\"\"\"\n\nimport unittest\nfrom models.amenity import Amenity\n\na = Amenity()\n\n\nclass test_amenity(unittest.TestCase):\n \"\"\"Holds tests for class State\"\"\"\n\n def test_name(self):\n self.assertTrue(hasattr(a, \"name\"))\n self.assertIsInstance(a.name, str)\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4479166567325592, "alphanum_fraction": 0.45370370149612427, "avg_line_length": 26, "blob_id": "f44f7521142f31d3d753f6097866842d1dad25ca", "content_id": "8f4ea676011badc9fa92cf64afecdae269c47b58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 864, "license_type": "no_license", "max_line_length": 73, "num_lines": 32, "path": "/models/user.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"\nThis module contains the User class\n\"\"\"\nfrom datetime import datetime\nfrom models.base_model import BaseModel\n\n\nclass User(BaseModel):\n \"\"\"Class for User\"\"\"\n email = \"\"\n password = \"\"\n first_name = \"\"\n last_name = \"\"\n count = 0\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor of class User instance\n \"\"\"\n if kwargs is not None and len(kwargs) != 0:\n for i in kwargs:\n if i == \"__class__\":\n continue\n if i == \"created_at\" or i == \"updated_at\":\n kwargs[i] = datetime.strptime(kwargs[i],\n \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, i, kwargs[i])\n User.count += 1\n else:\n super().__init__()\n User.count += 1\n" }, { "alpha_fraction": 0.7111853361129761, "alphanum_fraction": 0.7255425453186035, "avg_line_length": 31.204301834106445, "blob_id": "f4e8562f96f803fe5e515b65331ced5cbea46b80", "content_id": "61cc2637dc0df000617e632d23d37839ceccddce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2995, "license_type": "no_license", "max_line_length": 193, "num_lines": 93, "path": "/README.md", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "# Lonzo and Kyle Hbnb Console\n## Description\n\nRecreation of the AirBnb console. Takes input from user and stores data in JSON file. Takes various commands pertaining to and Airbnb listing.\n\n## Requirements\n\n* Must follow Pep8 style guidelines\n* Allowed code editors are: vi, vim, and emacs\n* Must have `README.md`file\n* Must have **#!/usr/bin/python3** as shebang\n* All files must be executable\n* All classes, modules, and functions should have documentation\n\n## Unit Test Requirements\n* All test files should end with a new line\n* All test files should be inside a folder **tests**\n* All test files should be python files\n* All tests files and folder names should start with **test_**\n* All classes, modules, and functions should have documentation\n\n## Files, Folders, and Functions\n \n### Unit Tests\n* test_models - Directory for testing Amenity, BaseModel, City, Place, Review, State, and User classes\n * test_amenity.py - tests amenity class\n * test_base_model.py - tests BaseModel class\n * test_city.py - tests City class\n * test_place.py - tests Place class\n * test_review.py - tests Review class\n * test_state.py - tests State class\n * test_user.py - tests User class\n* test_engine - Directory for testing FileStorage\n * test_file_storage.py - Tests FileStorage class\n\n### Models\n* models - Directory for Amenity, BaseModel, City, Place, Review, State, and User classes\n * amenity.py - amenity class\n * base_model.py - BaseModel class\n * city.py - City class\n * place.py - Place class\n * review.py - Review class\n * state.py - State class\n * user.py - User class\n* engine - Directory for FileStorage\n * file_storage.py - FileStorage class\n\t * Deals with saving, reloading, and new objects.\n\n### console.py\nThis is sort of like a shell from a previous project. There is a prompt and it takes input from the keyboard. Help functions were created to display help text if a command was used incorrectly.\n\n* do_all(self, arg) - Prints all str representations of all instances based on class name. If no class name, prints all instances of all objects\n* do_update(self, arg) - Update an instance based on class name and id\n* do_show(self, arg) - Prints str representation of an instance\n* do_create(self, arg) - Creates new BaseModel objects\n* do_destroy(self, arg) - Destroys an instance based on class name and id\n* do_quit(self, arg) - Executes the quit command to exit console.\n* do_EOF(self, line) - Executes EOF\n* emptyline(self) - deals with empty line given in console\n* default(self, line) - deals with the command class name.all()\n## Compilation\n\n```c\n./console.py\n```\n## Sample Output\n### Interactive Mode\n\n```c\n~/user$ ./console.py\n(hbnb)create User\n1e756283-81d8-4061-b4d3-65c86335b4ca\n(hbnb)\n\n```\n\n### Non-Interactive Mode\n\n```c\n~/user$ echo \"create City\" | ./console.py\n(hbnb)2cc29369-fc0d-462f-abfd-c73b0093a5cb\n~/user$\n```\n\n## Bugs\n\nNone at this time\n\n\n## Authors\n\nLonzo Rust | [GitHub](https://github.com/lonzor)\nKyle Gross | [GitHub](https://github.com/kyle-gross)\n" }, { "alpha_fraction": 0.4569767415523529, "alphanum_fraction": 0.46279069781303406, "avg_line_length": 26.74193572998047, "blob_id": "abcd27856e5333ca2d8bbea491d5bda47ceaa9fe", "content_id": "15ecf50b9c1baf1818e44c6602fd7ca48740652a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 860, "license_type": "no_license", "max_line_length": 73, "num_lines": 31, "path": "/models/city.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"Module for class City. Inherits from BaseModel.\"\"\"\n\n\nfrom models.base_model import BaseModel\nfrom datetime import datetime\n\n\nclass City(BaseModel):\n \"\"\"City data - state_id and name of city\"\"\"\n\n state_id = \"\"\n name = \"\"\n count = 0\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor of class User instance\n \"\"\"\n if kwargs is not None and len(kwargs) != 0:\n for i in kwargs:\n if i == \"__class__\":\n continue\n if i == \"created_at\" or i == \"updated_at\":\n kwargs[i] = datetime.strptime(kwargs[i],\n \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, i, kwargs[i])\n City.count += 1\n else:\n super().__init__()\n City.count += 1\n" }, { "alpha_fraction": 0.6379746794700623, "alphanum_fraction": 0.6405063271522522, "avg_line_length": 22.235294342041016, "blob_id": "b76a77a46843f3abf1b8b9db869083b2a793c407", "content_id": "390beba21f069e85f5b92a875494d70fbe677922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 395, "license_type": "no_license", "max_line_length": 62, "num_lines": 17, "path": "/tests/test_models/test_state.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"Unit test cases for class State(inherits from BaseModel)\"\"\"\n\nimport unittest\nfrom models.state import State\n\n\nclass test_state(unittest.TestCase):\n \"\"\"Holds tests for class State\"\"\"\n\n def test_name(self):\n st = State()\n self.assertTrue(hasattr(st, \"name\"))\n self.assertIsInstance(st.name, str)\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.5795356631278992, "alphanum_fraction": 0.5872743129730225, "avg_line_length": 24.282608032226562, "blob_id": "8bff887ac3384c4ae8e592cd605d1c7a074cebce", "content_id": "72c9040557a1bd36d73dc82c88a3bd956bdb254a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1163, "license_type": "no_license", "max_line_length": 60, "num_lines": 46, "path": "/tests/test_models/test_base_model.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\nimport unittest\nfrom models.base_model import BaseModel\nimport os\nfrom datetime import datetime\n\n\nclass TestBaseModel(unittest.TestCase):\n \"\"\"\n Tests BaseModel class\n \"\"\"\n\n def test_save(self):\n model = BaseModel()\n time1 = model.updated_at\n model.save()\n time2 = model.updated_at\n self.assertNotEqual(time1, time2)\n\n def test_save2(self):\n if os.path.exists(\"file.json\"):\n os.remove(\"file.json\")\n model = BaseModel()\n model.save()\n self.assertNotEqual(os.path.getsize(\"file.json\"), 0)\n\n def test_to_dict(self):\n model = BaseModel()\n dict1 = model.to_dict()\n self.assertIn(\"__class__\", dict1.keys())\n\n def test_self_id(self):\n model = BaseModel()\n self.assertNotEqual(model.id, \"\")\n\n def test_created_at(self):\n model = BaseModel()\n self.assertIsInstance(model.created_at, datetime)\n\n def test_str(self):\n model = BaseModel()\n self.assertIn(model.id, model.__str__())\n self.assertIn(str(model.__dict__), model.__str__())\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.468125581741333, "alphanum_fraction": 0.48049476742744446, "avg_line_length": 25.274999618530273, "blob_id": "b96e4c1306b03f9cf755632eb82c336f6430a099", "content_id": "0c7b56fe078484952a2cf5433cd8e8a63712b66c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1051, "license_type": "no_license", "max_line_length": 73, "num_lines": 40, "path": "/models/place.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"Module for class Place. Inherits from BaseModel\"\"\"\n\n\nfrom models.base_model import BaseModel\nfrom datetime import datetime\n\n\nclass Place(BaseModel):\n \"\"\"Has lots of data about the Airbnb listing\"\"\"\n\n city_id = \"\"\n user_id = \"\"\n name = \"\"\n description = \"\"\n number_rooms = 0\n number_bathrooms = 0\n max_guest = 0\n price_by_night = 0\n latitude = 0.0\n longitude = 0.0\n amenity_ids = []\n count = 0\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor of class User instance\n \"\"\"\n if kwargs is not None and len(kwargs) != 0:\n for i in kwargs:\n if i == \"__class__\":\n continue\n if i == \"created_at\" or i == \"updated_at\":\n kwargs[i] = datetime.strptime(kwargs[i],\n \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, i, kwargs[i])\n Place.count += 1\n else:\n super().__init__()\n Place.count += 1\n" }, { "alpha_fraction": 0.5966569781303406, "alphanum_fraction": 0.6075581312179565, "avg_line_length": 24.481481552124023, "blob_id": "0f43f966a9474d04af26a0540e0493dd6898a0d1", "content_id": "253787b3389d2e7ef36b7074b287b3c1b8873667", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1376, "license_type": "no_license", "max_line_length": 70, "num_lines": 54, "path": "/tests/test_models/test_engine/test_file_storage.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"\nContains tests for FileStorge class\n\"\"\"\nimport unittest\nimport os\nfrom models.base_model import BaseModel\nfrom models.engine.file_storage import FileStorage\n\n\nfs = FileStorage()\n\n\nclass TestFileStorage(unittest.TestCase):\n \"\"\"\n Tests for FileStorage class\n \"\"\"\n model = BaseModel()\n file_path = fs._FileStorage__file_path\n\n def test_all(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)\n self.assertIsInstance(fs.all(), dict)\n\n def test_new(self):\n fs_all = fs.all().copy()\n model2 = BaseModel()\n self.assertNotEqual(fs_all, fs.all())\n\n def test_save(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)\n model2 = BaseModel()\n model2.save()\n self.assertNotEqual(os.path.getsize(self.file_path), 0)\n\n def test_reload(self):\n if os.path.exists(self.file_path):\n os.remove(self.file_path)\n model2 = BaseModel()\n model2.save()\n fs.reload()\n all_dict = fs.all().copy()\n model2.my_num = 100\n self.assertEqual(model2.my_num, 100)\n fs.reload()\n self.assertNotEqual(fs.all(), all_dict)\n\n def test_obj_dict(self):\n self.assertIsInstance(FileStorage._FileStorage__objects, dict)\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6376237869262695, "alphanum_fraction": 0.6396039724349976, "avg_line_length": 21.954545974731445, "blob_id": "6d004217eae5c90d0efee593b77d62beb1b960af", "content_id": "d060b0600510111a824b204d7789279206514da2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 61, "num_lines": 22, "path": "/tests/test_models/test_city.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"Unit test cases for class City(inherits from BaseModel)\"\"\"\n\nimport unittest\nfrom models.city import City\n\nc = City()\n\n\nclass test_city(unittest.TestCase):\n \"\"\"Holds tests for class State\"\"\"\n\n def test_name(self):\n self.assertTrue(hasattr(c, \"name\"))\n self.assertIsInstance(c.name, str)\n\n def test_state_id(self):\n self.assertTrue(hasattr(c, \"state_id\"))\n self.assertIsInstance(c.state_id, str)\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.4654761850833893, "alphanum_fraction": 0.4714285731315613, "avg_line_length": 27, "blob_id": "9c34775fe866c0f6c6ecaf84d3b3614faeaccb33", "content_id": "62b092dc69568ab8e826ce1dd4633d649eb00abf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 73, "num_lines": 30, "path": "/models/amenity.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"Module for class Amenity. Inherits from BaseModel\"\"\"\n\n\nfrom models.base_model import BaseModel\nfrom datetime import datetime\n\n\nclass Amenity(BaseModel):\n \"\"\"Class holds amenity name\"\"\"\n\n name = \"\"\n count = 0\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Constructor of class User instance\n \"\"\"\n if kwargs is not None and len(kwargs) != 0:\n for i in kwargs:\n if i == \"__class__\":\n continue\n if i == \"created_at\" or i == \"updated_at\":\n kwargs[i] = datetime.strptime(kwargs[i],\n \"%Y-%m-%dT%H:%M:%S.%f\")\n setattr(self, i, kwargs[i])\n Amenity.count += 1\n else:\n super().__init__()\n Amenity.count += 1\n" }, { "alpha_fraction": 0.624211847782135, "alphanum_fraction": 0.625472903251648, "avg_line_length": 23.030303955078125, "blob_id": "05ae8f872210e5cb398e226c0696c4d6d71b3989", "content_id": "37467957635239a8974d76308ea789cc6f79a564", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 793, "license_type": "no_license", "max_line_length": 49, "num_lines": 33, "path": "/tests/test_models/test_user.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"\nThis module contains unittests for the User class\n\"\"\"\nimport unittest\nimport os\nfrom models.user import User\n\n\nu = User()\n\n\nclass TestUser(unittest.TestCase):\n \"\"\"Unittests for User class\"\"\"\n\n def test_email(self):\n self.assertTrue(hasattr(u, \"email\"))\n self.assertIsInstance(u.email, str)\n \n def test_password(self):\n self.assertTrue(hasattr(u, \"password\"))\n self.assertIsInstance(u.password, str)\n \n def test_first_name(self):\n self.assertTrue(hasattr(u, \"first_name\"))\n self.assertIsInstance(u.first_name, str)\n \n def test_last_name(self):\n self.assertTrue(hasattr(u, \"last_name\"))\n self.assertIsInstance(u.last_name, str)\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6438438296318054, "alphanum_fraction": 0.644444465637207, "avg_line_length": 27.70689582824707, "blob_id": "e32f7f832f454436739823cff10424f780dd0b40", "content_id": "c61c40eb422e65d7f5b3e5ec51a18a789d85f291", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1665, "license_type": "no_license", "max_line_length": 55, "num_lines": 58, "path": "/tests/test_models/test_place.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"Unit tests for place.\"\"\"\n\nimport unittest\nfrom models.place import Place\n\n\np = Place()\n\n\nclass test_place(unittest.TestCase):\n \"\"\"Tests for place\"\"\"\n def test_city_id(self):\n self.assertTrue(hasattr(p, \"city_id\"))\n self.assertIsInstance(p.city_id, str)\n\n def test_user_id(self):\n self.assertTrue(hasattr(p, \"user_id\"))\n self.assertIsInstance(p.user_id, str)\n\n def test_name(self):\n self.assertTrue(hasattr(p, \"name\"))\n self.assertIsInstance(p.name, str)\n\n def test_description(self):\n self.assertTrue(hasattr(p, \"description\"))\n self.assertIsInstance(p.description, str)\n\n def test_number_rooms(self):\n self.assertTrue(hasattr(p, \"number_rooms\"))\n self.assertIsInstance(p.number_rooms, int)\n\n def test_number_bathrooms(self):\n self.assertTrue(hasattr(p, \"number_bathrooms\"))\n self.assertIsInstance(p.number_bathrooms, int)\n\n def test_max_guest(self):\n self.assertTrue(hasattr(p, \"max_guest\"))\n self.assertIsInstance(p.max_guest, int)\n\n def test_price_by_night(self):\n self.assertTrue(hasattr(p, \"price_by_night\"))\n self.assertIsInstance(p.price_by_night, int)\n\n def test_latitude(self):\n self.assertTrue(hasattr(p, \"latitude\"))\n self.assertIsInstance(p.latitude, float)\n\n def test_longitude(self):\n self.assertTrue(hasattr(p, \"longitude\"))\n self.assertIsInstance(p.longitude, float)\n\n def test_amenity_ids(self):\n self.assertTrue(hasattr(p, \"amenity_ids\"))\n self.assertIsInstance(p.amenity_ids, list)\n\nif __name__ == '__main__':\n unittest.main()\n" }, { "alpha_fraction": 0.6319218277931213, "alphanum_fraction": 0.6335504651069641, "avg_line_length": 22.615385055541992, "blob_id": "053244809d803e974fdc73e2a216c59fc0767ce9", "content_id": "71ea43e922df572f744065fa48b3afa6f40d7e2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 614, "license_type": "no_license", "max_line_length": 47, "num_lines": 26, "path": "/tests/test_models/test_review.py", "repo_name": "kyle-gross/AirBnB_clone", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\"\"\"Module for unit tests for class Review\"\"\"\n\nimport unittest\nfrom models.review import Review\n\nr = Review()\n\n\nclass test_review(unittest.TestCase):\n \"\"\"Holds tests for class Review\"\"\"\n\n def test_place_id(self):\n self.assertTrue(hasattr(r, \"place_id\"))\n self.assertIsInstance(r.place_id, str)\n\n def test_user_id(self):\n self.assertTrue(hasattr(r, \"user_id\"))\n self.assertIsInstance(r.user_id, str)\n\n def text(self):\n self.assertTrue(hasattr(r, \"text\"))\n self.assertIsInstance(r.text, str)\n\nif __name__ == '__main__':\n unittest.main()\n" } ]
16
campustimes/kaggle_titanic
https://github.com/campustimes/kaggle_titanic
d3dc15ca8c3ccaa9db7bd4a25144f2a97a3e34d9
6edf2a199217f0dac2656ce39a08e6f4d71848fc
0c283dbd8408f8349ff7efdbb31c12f38017e726
refs/heads/master
2016-08-03T04:30:20.725318
2014-09-14T11:42:30
2014-09-14T11:42:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6423712372779846, "alphanum_fraction": 0.6511175632476807, "avg_line_length": 25.28205108642578, "blob_id": "6f568c3dece88f8831e71390f8497841700c0442", "content_id": "a9ea7e70095ddc4cf830fcaec744948ce23ff288", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1029, "license_type": "no_license", "max_line_length": 122, "num_lines": 39, "path": "/genderclassmodel.py", "repo_name": "campustimes/kaggle_titanic", "src_encoding": "UTF-8", "text": "\nimport numpy as np\nimport sklearn, scipy, csv\n\n#data = np.loadtxt('/Users/jcauteru/Desktop/Kaggle/train.csv', delimiter = ',', skiprows= 1, usecols= (1,2))\n\n\ninfile = csv.reader(open('/Users/jcauteru/Desktop/Kaggle/train.csv', 'rb')) #Load in the csv file\nprint infile\nheader = infile.next()\ndata=[]\nfor row in infile:\n data.append(row)\ndata = np.array(data)\nprint header\nprint data[:,:]\n\nvars = ['survived', 'pclass', 'name', 'sex', 'age', 'sibsp', 'parch', 'ticket', 'fare', 'cabin', 'embarked', 'room_count']\n\nfor record in data[:,3]:\n if record == 'male': record = 1\n else: record = 2\n\nrooms = []\nfor record in data[:,9]:\n hold = [record.count(' ')]\n rooms.append(hold)\n\nroom_var = np.array(rooms)\narray2 = np.hstack((data,room_var))\n\nfor record in array2[:,:]:\n print record\n\n#test_file_obect = csv.reader(open('../csv/test.csv', 'rb'))\n#open_file_object = csv.writer(open(\"../csv/genderclasspricebasedmodelpy.csv\", \"wb\"))\n\n#header = test_file_obect.next()\n\n#First thing to do is bin up the price file\n\n\n\n" }, { "alpha_fraction": 0.6974570155143738, "alphanum_fraction": 0.7161555886268616, "avg_line_length": 28.711111068725586, "blob_id": "5fe84252dd5bc3f22c83e5cbf3e09a345b51239c", "content_id": "10e6f23ef1c602ee2672c54148798807e0cb71b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2674, "license_type": "no_license", "max_line_length": 114, "num_lines": 90, "path": "/import_train_test.py", "repo_name": "campustimes/kaggle_titanic", "src_encoding": "UTF-8", "text": "\nfrom pybrain.datasets import ClassificationDataSet\nfrom pybrain.utilities import percentError\nfrom pybrain.tools.shortcuts import buildNetwork\nfrom pybrain.supervised.trainers import BackpropTrainer\nfrom pybrain.structure.modules import SoftmaxLayer\n\nfrom scipy import diag, arange, meshgrid, where\nfrom numpy.random import multivariate_normal\n\n\n#Find number of classes;\nclasses_full = []\nraw_data = []\nfull_dim = 0\nimport csv\n\n\n\nimport numpy as np\nfrom numpy import genfromtxt\nfrom sklearn import (metrics, cross_validation, linear_model, preprocessing, svm)\nmy_data = genfromtxt('/hdd/kaggle/amazon_perms/train.csv', delimiter=\",\")\nmy_data_TT = genfromtxt('/hdd/kaggle/amazon_perms/test.csv', delimiter=\",\")\nnew_array = my_data[1:]\nnew_array_TT = my_data_TT[1:]\n\n\nsubset = [1, 2, 4, 5, 7, 8, 9]\n\ny, X = new_array[:,0], new_array[:, subset]\n\n\n#y, X = training_data[:,0], training_data[:, subset]\n#y_test, X_test = 0, new_array_TT[:, subset]\n\nencoder = preprocessing.OneHotEncoder()\nencoder.fit((X))\ntraining_data = encoder.transform(X) # Returns a sparse matrix (see numpy.sparse)\n\n\n\nX_train, X_cv, y_train, y_cv = cross_validation.train_test_split(training_data, y, test_size=.25, random_state=26)\n\nprint np.size(X_train, 0)\nprint np.size(X_cv, 0)\n\nalldata = ClassificationDataSet(np.size(X_train, 1), 1, nb_classes=2)\nvaldata = ClassificationDataSet(np.size(X_cv, 1), 1, nb_classes=2)\n#X_test = encoder.transform(X_test)\n\nfor i in range(len(y_train)):\n print X_train[i, :]\n print alldata.addSample(X_train[i, :], y_train[i])\n\nprint training_data[0, 1]\n\nval_actual = []\nfor row in test_data:\n valdata.addSample(row[1:], row[0])\n val_actual.append(row[0])\n\nvaldata._convertToOneOfMany()\n\ntstdata, trndata = alldata.splitWithProportion( 0.25 )\ntrndata._convertToOneOfMany( )\ntstdata._convertToOneOfMany( )\n\nfnn = buildNetwork( trndata.indim, 100, trndata.outdim, outclass=SoftmaxLayer )\ntrainer = BackpropTrainer( fnn, dataset=trndata, momentum=0.1, verbose=True, weightdecay=0.01)\\\n\ntrnresult = percentError( trainer.testOnClassData(),trndata['class'] )\ntstresult = percentError( trainer.testOnClassData( dataset=tstdata ), tstdata['class'] )\n\nimport numpy as NP\nfrom sklearn import datasets\nfrom sklearn import datasets as DS\nD = digits.data\nT = digits.target\n\nfrom sklearn import metrics\n\nfor i in range(20):\n trainer.trainEpochs( 5 )\n print \" train error: %5.2f%%\" % trnresult, \" test error: %5.2f%%\" % tstresult\n out = fnn.activateOnDataset(valdata)\n out = out.argmax(axis=1)\n print min(out)\n fpr, tpr, thresholds = metrics.roc_curve(val_actual, out, pos_label=1)\n auc = metrics.auc(fpr,tpr)\n print auc" }, { "alpha_fraction": 0.6753246784210205, "alphanum_fraction": 0.6753246784210205, "avg_line_length": 18.25, "blob_id": "a638b70c4f7867720bcb9be38fcc49db9646c3b4", "content_id": "1cb840937646f270ce99c06a4d08dd72ae55d51e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 77, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/README.md", "repo_name": "campustimes/kaggle_titanic", "src_encoding": "UTF-8", "text": "kaggle_titanic\n==============\n\nRepo for my Kaggle Titanic prediction project\n" } ]
3
shrutiaggarwal3012/OpenErpCodeGenerator
https://github.com/shrutiaggarwal3012/OpenErpCodeGenerator
c6fa4ccab7ed14fc176dd2768dd6fdbb1780c77b
4a8e741d322c644de5ad214ffdfb8daec9fde591
6e9ebfe982e0ae3b46d789398ae55bf2008e693d
refs/heads/master
2020-05-29T22:13:08.449989
2014-05-02T19:03:24
2014-05-02T19:03:24
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4784688949584961, "alphanum_fraction": 0.4784688949584961, "avg_line_length": 14, "blob_id": "34ddf59baa5d188c03e172829a4ed99b0ff79574", "content_id": "080eed347fe8657afc1d22e8bcf43062fa7e2bbc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 209, "license_type": "no_license", "max_line_length": 30, "num_lines": 14, "path": "/src/main/resources/python_template.py", "repo_name": "shrutiaggarwal3012/OpenErpCodeGenerator", "src_encoding": "UTF-8", "text": "from osv import osv, fields\n\n\nclass tvi_%s(osv.osv):\n _name = \"tvi.%s\"\n _description=\"%s\"\n _inherit = ['mail.thread']\n _order = \"%s\"\n _columns = {\n \"%s\": fields.%s(\"%s\"),\n }\n\n\ntvi_%s()" }, { "alpha_fraction": 0.8453608155250549, "alphanum_fraction": 0.8453608155250549, "avg_line_length": 23.5, "blob_id": "f1f39a021957063a30c96c55c82298a1e8276cdb", "content_id": "42532f89beb8bc19a9793e280a6b43f33ee079b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 97, "license_type": "no_license", "max_line_length": 25, "num_lines": 4, "path": "/src/main/resources/properties/erp_types.properties", "repo_name": "shrutiaggarwal3012/OpenErpCodeGenerator", "src_encoding": "UTF-8", "text": "char=display_name size\ninteger=display_name size\nfloat=display_name digits\ntext=display_name size" }, { "alpha_fraction": 0.6315789222717285, "alphanum_fraction": 0.6650717854499817, "avg_line_length": 15.076923370361328, "blob_id": "8847f3e1c7df1f5b3a99dd686d13049da65b7808", "content_id": "f942eacef6f7a8181379b2e43245fa64b2f6f93a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 209, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/src/main/java/com/erp/main/ErpGenerator.java", "repo_name": "shrutiaggarwal3012/OpenErpCodeGenerator", "src_encoding": "UTF-8", "text": "package com.erp.main;\n\n/**\n * Created with IntelliJ IDEA.\n * User: shrutii\n * Date: 2/5/14\n * Time: 6:57 PM\n * To change this template use File | Settings | File Templates.\n */\npublic class ErpGenerator {\n\n\n}\n" } ]
3
02ashes/lab3
https://github.com/02ashes/lab3
a982643b07ac83b1666930789dd7916ce3b9880b
ffc6863bba13f41a7993ff648366e253a4eb9087
80ddb70da901a964e6664a79125a2dd0318c8342
refs/heads/main
2023-07-29T10:26:30.148682
2021-09-15T10:07:16
2021-09-15T10:07:16
406,699,247
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6940245032310486, "alphanum_fraction": 0.705543577671051, "avg_line_length": 28.870967864990234, "blob_id": "688caa7bf17e3c09fa319ca31036d44eb489a946", "content_id": "2521624a40a971a5772118a92e682dcbd0d04804", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2778, "license_type": "no_license", "max_line_length": 115, "num_lines": 93, "path": "/flaskapp/some_app.py", "repo_name": "02ashes/lab3", "src_encoding": "UTF-8", "text": "print(\"Hello word\")\n\nfrom flask import Flask\napp = Flask(__name__)\n\[email protected](\"/\")\ndef hello():\n return \" <html><head></head> <body> To add '/net' </body></html>\"\n\nif __name__ == \"__main__\":\n app.run(host='127.0.0.1',port=5000)\n \nfrom flask import render_template\n\[email protected](\"/data_to\")\ndef data_to():\n some_pars = {'user':'Ivan','color':'red'}\n some_str = 'Hello my dear friends!'\n some_value = 10\n return render_template('simple.html',some_str = some_str, some_value = some_value,some_pars=some_pars) \n\nfrom flask_wtf import FlaskForm,RecaptchaField\nfrom wtforms import StringField, SubmitField, TextAreaField\nfrom wtforms.validators import DataRequired\nfrom flask_wtf.file import FileField, FileAllowed, FileRequired\n\nSECRET_KEY = 'secret'\napp.config['SECRET_KEY'] = SECRET_KEY \napp.config['RECAPTCHA_USE_SSL'] = False\napp.config['RECAPTCHA_PUBLIC_KEY'] = '6Lem7C0bAAAAAFb0mjCJ-JvSZOIAJFQ4ZhVnYxDd'\napp.config['RECAPTCHA_PRIVATE_KEY'] = '6Lem7C0bAAAAAHPlZJAjMJdrhuN8twMTBf0ilGIg'\napp.config['RECAPTCHA_OPTIONS'] = {'theme': 'white'}\n\nfrom flask_bootstrap import Bootstrap\nbootstrap = Bootstrap(app)\n\nclass NetForm(FlaskForm):\n openid = StringField('openid', validators = [DataRequired()])\n upload = FileField('Load image', validators=[FileRequired(), FileAllowed(['jpg', 'png', 'jpeg'], 'Images only!')])\n recaptcha = RecaptchaField()\n submit = SubmitField('send')\n \nfrom werkzeug.utils import secure_filename\nimport os\nimport net as neuronet\n\[email protected](\"/net\",methods=['GET', 'POST'])\ndef net():\n form = NetForm()\n filename=None\n neurodic = {}\n if form.validate_on_submit():\n filename = os.path.join('./static', secure_filename(form.upload.data.filename))\n fcount, fimage = neuronet.read_image_files(10,'./static')\n decode = neuronet.getresult(fimage)\n for elem in decode:\n neurodic[elem[0][1]] = elem[0][2]\n form.upload.data.save(filename)\n return render_template('net.html',form=form,image_name=filename,neurodic=neurodic)\n\nfrom flask import request\nfrom flask import Response\nimport base64\nfrom PIL import Image\nfrom io import BytesIO\nimport json\n\[email protected](\"/apinet\",methods=['GET', 'POST'])\ndef apinet():\n neurodic = {}\n if request.mimetype == 'application/json': \n data = request.get_json()\n filebytes = data['imagebin'].encode('utf-8')\n cfile = base64.b64decode(filebytes)\n img = Image.open(BytesIO(cfile))\n decode = neuronet.getresult([img])\n neurodic = {}\n for elem in decode:\n neurodic[elem[0][1]] = str(elem[0][2])\n print(elem)\n ret = json.dumps(neurodic)\n resp = Response(response=ret, status=200, mimetype=\"application/json\")\n return resp\n\nimport lxml.etree as ET\[email protected](\"/apixml\",methods=['GET', 'POST'])\ndef apixml():\n dom = ET.parse(\"./static/xml/file.xml\")\n xslt = ET.parse(\"./static/xml/file.xslt\")\n transform = ET.XSLT(xslt)\n newhtml = transform(dom)\n strfile = ET.tostring(newhtml)\n return strfile\n" }, { "alpha_fraction": 0.6239016056060791, "alphanum_fraction": 0.6801406145095825, "avg_line_length": 22.66666603088379, "blob_id": "3eb567bcc189c5989d533dab3b45c27c0a9455dc", "content_id": "afd5dad629d61c9291fc6e591c22702f87695a6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 569, "license_type": "no_license", "max_line_length": 66, "num_lines": 24, "path": "/flaskapp/client.py", "repo_name": "02ashes/lab3", "src_encoding": "UTF-8", "text": "import requests\nr = requests.get('http://localhost:5000/')\nr = requests.get('http://localhost:5000/data_to') \n\nimport os\nimport base64\nimg_data = None\npath = os.path.join('./static','image.jpg')\nwith open(path, 'rb') as fh:\n img_data = fh.read()\n b64 = base64.b64encode(img_data)\njsondata = {'imagebin':b64.decode('utf-8')}\nres = requests.post('http://localhost:5000/apinet', json=jsondata)\nif res.ok:\n print(res.json()) \n \ntry:\n r = requests.get('http://localhost:5000/apixml')\n print(r.status_code)\n if(r.status_code!=200):\n exit(1)\n print(r.text)\nexcept:\n exit(1)\n\n" }, { "alpha_fraction": 0.7347354292869568, "alphanum_fraction": 0.7557666301727295, "avg_line_length": 31.755556106567383, "blob_id": "8d4eb9a9f994ca873955baeefc80868982d253d8", "content_id": "e61566bc944c8bb31aade3f6f12f2d92dfb33550", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1474, "license_type": "no_license", "max_line_length": 81, "num_lines": 45, "path": "/flaskapp/net.py", "repo_name": "02ashes/lab3", "src_encoding": "UTF-8", "text": "import keras\nfrom keras.layers import Input\nfrom keras.applications import MobileNetV2\nfrom keras.applications.mobilenet_v2 import preprocess_input, decode_predictions \nimport os\nfrom PIL import Image\nimport numpy as np\nfrom tensorflow.compat.v1 import ConfigProto\nfrom tensorflow.compat.v1 import InteractiveSession\n\nconfig = ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.7\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\nheight = 224\nwidth = 224\nnh=224\nnw=224\nncol=3\n\nvisible2 = Input(shape=(nh,nw,ncol),name = 'imginp')\nresnet = MobileNetV2(include_top=True,\nweights=\"imagenet\", input_tensor=visible2,\ninput_shape=None, pooling=None, classes=1000)\n\ndef read_image_files(files_max_count,dir_name):\n files = [item.name for item in os.scandir(dir_name) if item.is_file()]\n files_count = files_max_count\n if(files_max_count>len(files)): \n files_count = len(files)\n image_box = [[]]*files_count\n for file_i in range(files_count):\n image_box[file_i] = Image.open(dir_name+'/'+files[file_i])\n return files_count, image_box\n\ndef getresult(image_box):\n files_count = len(image_box)\n images_resized = [[]]*files_count\n for i in range(files_count):\n images_resized[i] = np.array(image_box[i].resize((height,width)))#/255.0\n images_resized = np.array(images_resized)\n images_resized = preprocess_input(images_resized)\n out_net = resnet.predict(images_resized) \n decode = decode_predictions(out_net, top=1)\n return decode\n" } ]
3
jamais-vu/insert-date
https://github.com/jamais-vu/insert-date
f84f183eb7a2b86f0805195fc8e4331b52f23193
e655070db3bef9183c3ec3c4eae8fb3b821a7427
8533e87eea8a0a891d772c6598b2a8d04313d0c4
refs/heads/master
2020-04-16T02:54:22.668837
2019-01-14T06:00:39
2019-01-14T06:00:39
165,212,921
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6280276775360107, "alphanum_fraction": 0.6366782188415527, "avg_line_length": 35.125, "blob_id": "7368a8519cf178d1c7a76e7187a1ce3738913d5a", "content_id": "ec1811827f24925c6c378cc8149a9982e9ba52b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 578, "license_type": "no_license", "max_line_length": 72, "num_lines": 16, "path": "/insertdate.py", "repo_name": "jamais-vu/insert-date", "src_encoding": "UTF-8", "text": "import sublime\nimport sublime_plugin\nimport datetime\n\n# Sublime Text supports multiple cursors in a single file, so we use \n# self.view.sel()[0] to get the location of the first cursor.\n\nclass DateCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n self.view.replace(edit, self.view.sel()[0], \n str(datetime.datetime.now().date()) + ' ')\n\nclass TimeCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n self.view.replace(edit, self.view.sel()[0],\n str(datetime.datetime.now().time())[0:5] + ' ')\n" }, { "alpha_fraction": 0.743139386177063, "alphanum_fraction": 0.7541163563728333, "avg_line_length": 37.76595687866211, "blob_id": "5d73b78df61f6bc4da854bea58454758895ddd11", "content_id": "6b1b081e63e004579a2574c5e2dfe00c00522188", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1822, "license_type": "no_license", "max_line_length": 82, "num_lines": 47, "path": "/README.md", "repo_name": "jamais-vu/insert-date", "src_encoding": "UTF-8", "text": "# insert-date\n\nA Sublime Text 3 plugin to insert the current date and time at the cursor. \n\n# Installation\nTo install the plugin, clone this repository to your Sublime Text `Packages`\ndirectory. The plugin will then be active. There is no need to restart \nSublime.\n\nIf you have trouble locating your Packages directory, open Sublime, and in the \nmenu bar select `Preferences` then `Browse Packages...` to open the Packages \ndirectory. Here are the usual locations for each operating system:\n\n**Windows** \nInstalled: `C:\\Users\\<your username>\\AppData\\Roaming\\Sublime Text 3\\Packages` \nPortable: `<path>\\Sublime Text 3\\Data\\Packages`\n \n**OS X** \n`~/Library/Application Support/Sublime Text 3/Packages`\n \n**Linux** \n`~/.config/sublime-text-3/Packages/`\n\n### Uninstall\n\nDelete the directory `<path>\\Packages\\insert-date`.\n\n# Usage\nUse the key bindings to insert the current local date or time at the cursor. \nThis will replace any selected text. If there are multiple cursors, the \ninsert is done at the location of the first cursor.\n\n**Date**: `alt+shift+d`. Date is formatted `YYYY-MM-DD `. \n**Time**: `alt+shift+t`. Time is formatted `HH:MM `, using the 24 hr clock. \n\nBoth the date and the time have a space at the end.\n\nThe inserted text remains selected; you will have to manually deselect it to\ncontinue typing. [This inconvenience will be fixed](\nhttps://github.com/jamais-vu/insert-date/issues/1#issue-398226790).\n\n# Notes and Acknowledgement\nMy first Sublime plugin. I wanted a simple way to insert the time or date when\nI am writing in my journal. The Sublime Package Control website was experiencing\nan unknown error so I chose to make my own plugin. \n[This tutorial](https://cnpagency.com/blog/creating-sublime-text-3-plugins-part-1/\n \"Creating Sublime Text 3 Plugins, by Sam Mello\") helped me get started.\n" } ]
2
zwg-zero/sql_query
https://github.com/zwg-zero/sql_query
bdf191b92f6a2b170945455f94a9b1aa0566ef4d
84c7baa28a3a7aa68be9570b6fba7604d45068db
4b5e771b05fd76ad1db5dc61c4de31038f785504
refs/heads/master
2021-01-21T11:10:58.987569
2017-03-13T01:44:21
2017-03-13T01:44:21
83,530,485
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.533708930015564, "alphanum_fraction": 0.539342999458313, "avg_line_length": 42.09465026855469, "blob_id": "74623d4b426af887ed51dd3bc40a7e58cd04c3f8", "content_id": "fba0fd315213303e735612ac2e0a2c0241dfebb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10482, "license_type": "no_license", "max_line_length": 128, "num_lines": 243, "path": "/query.py", "repo_name": "zwg-zero/sql_query", "src_encoding": "UTF-8", "text": "# -*- coding: utf8 -*-\n# use python2.7 on linux amd64\n\nimport configure_735_2_5weekdays as configure # configure.py is setting file of this program, format refer to configure_fake.py\nimport os\nimport sys\nimport time\nimport json\nimport requests\nimport arrow\nimport MySQLdb # install mysqlclient please\nfrom xlsxwriter import Workbook\nimport datetime\nimport logging\nimport ftplib\nimport smtplib\nfrom email import encoders\nfrom email.mime.multipart import MIMEMultipart\n# from email.mime.application import MIMEApplication\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email.header import Header\n\n# setup logging\ncurrent_path = os.path.abspath(os.path.dirname(__file__))\nlog_file = os.path.join(current_path, sys.argv[0] + \".log\")\nlogging.basicConfig(filename=log_file,\n level=logging.INFO,\n filemode='w',\n format='%(asctime)s file:%(filename)s fun:%(funcName)s line:%(lineno)d %(levelname)s: %(message)s',\n )\n\n\n# query db and retrieve result\ndef query_db(host, port, user, passwd, db, query_sql, charset=\"utf8\"):\n result = []\n db = MySQLdb.connect(host=host, user=user, port=port, passwd=passwd, db=db,\n charset=charset)\n c = db.cursor()\n c.execute(query_sql)\n # save column name\n result.append([item[0].decode(\"utf-8\") for item in c.description]) # must decode or python2.7 would treat as byte\n for item in c.fetchall():\n # convert returned datetime.datetime or datetime.date to string\n item_content = []\n #print(item)\n for sub_item in item:\n if type(sub_item) == datetime.datetime:\n sub_item = sub_item.strftime(\"%Y-%m-%d %H:%M:%S\")\n elif type(sub_item) == datetime.date:\n sub_item = sub_item.strftime(\"%Y-%m-%d\")\n item_content.append(sub_item)\n result.append(item_content)\n # for content in result:\n # print(content)\n return result\n\n\n# write sql query result to excel file\ndef write_xls(filename, sheetname, content):\n #book = Workbook(filename, {'constant_memory': True}) # for debug to flush every row before new row begain write\n book = Workbook(filename)\n sheet1 = book.add_worksheet(sheetname)\n row_num = 0\n for row in content:\n col_num = 0\n for item in row:\n # print item, type(item)\n # try:\n # sheet1.write(row_num, col_num, item)\n # except Exception as e:\n # print('有错误退出')\n # print(str(e))\n # exit()\n sheet1.write(row_num, col_num, item)\n col_num += 1\n row_num += 1\n book.close()\n\n\ndef send_mail(server, user, password,\n recipients,\n cc_recipients='', # string separated by comma\n bcc_recipients='', # same as above\n subject='TEST',\n content='TEST',\n attach_files=None,\n ):\n msg = MIMEMultipart()\n msg[\"subject\"] = Header(subject, 'utf-8')\n msg[\"From\"] = user\n msg[\"To\"] = recipients\n if cc_recipients:\n msg[\"Cc\"] = cc_recipients\n msg.attach(MIMEText(content, 'plain', 'utf-8'))\n\n for file_item in attach_files:\n file_base_name = os.path.basename(file_item)\n attach_part = MIMEBase('application', 'octet-stream')\n attach_part.set_payload(open(file_item, 'rb').read())\n encoders.encode_base64(attach_part)\n attach_part.add_header('Content-Disposition', 'attachment',\n filename=(Header(file_base_name, 'utf-8').encode()))\n msg.attach(attach_part)\n\n receive_list = [item for item in recipients.split(', ')\n ] + [item for item in cc_recipients.split(', ') if cc_recipients\n ] + [item for item in bcc_recipients.split(', ') if bcc_recipients\n ]\n # try two times of sending mail\n try:\n s = smtplib.SMTP_SSL(server, timeout=30)\n s.login(user, password)\n s.sendmail(user, receive_list, msg.as_string())\n s.close()\n return True\n except Exception as e:\n print(str(e) + \"\\nTry again after 3s.\")\n time.sleep(3)\n try:\n s = smtplib.SMTP_SSL(server, timeout=30)\n s.login(user, password)\n s.sendmail(user, receive_list, msg.as_string())\n s.close()\n return True\n except Exception as e:\n print(str(e))\n logging.error(str(e))\n return False\n\n\n# send message to baidu gaojing http://tonggao.baidu.com if send mail fail\ndef send_gaojing(service_id, token, message):\n data = {\"service_id\": service_id,\n \"description\": message,\n \"event_type\": \"trigger\"\n }\n resp = requests.post(\"http://gaojing.baidu.com/event/create\",\n data=json.dumps(data),\n headers={\n \"servicekey\": token,\n },\n timeout=3, verify=False\n )\n result = json.loads(resp.content)\n print(\"The baidu Gaojing return message is: %s\" % result[\"message\"])\n logging.info(\"The baidu Gaojing return message is: %s\" % result[\"message\"])\n\n\ndef send_ftp(host, user, password, local_file, remote_path):\n remote_file_name = os.path.basename(local_file)\n try:\n ftp = ftplib.FTP(host, user, password, timeout=3)\n file_list = []\n ftp.retrlines('LIST',file_list.append)\n #print(file_list)\n if remote_path.encode(\"gb2312\") not in [item.split()[-1] for item in file_list]:\n print(\"make fold\")\n ftp.mkd(remote_path.encode('gb2312'))\n ftp.cwd(remote_path.encode(\"gb2312\"))\n # print(ftp.pwd().decode('gb2312'))\n file_handler = open(local_file, 'rb')\n ftp.storbinary(\"STOR %s\" % remote_file_name.encode('gb2312'), file_handler)\n file_handler.close()\n ftp.close()\n except Exception as e:\n logging.error(str(e))\n print(str(e))\n return False\n return True\n\n\nif __name__ == '__main__':\n for item in configure.task:\n content = ''\n total_attached_files = []\n for sql_item in item['database']:\n # try to execute db query, totally twice tried\n try:\n content = query_db(sql_item['host'], sql_item['port'], sql_item['user'], sql_item['passwd'],\n sql_item['db'], sql_item['sql'])\n except Exception as e:\n print(str(e) + '\\nTry again.')\n logging.error(str(e) + '\\nTry again.')\n time.sleep(3)\n try:\n content = query_db(sql_item['host'], sql_item['port'], sql_item['user'], sql_item['passwd'],\n sql_item['db'], sql_item['sql'])\n except Exception as e:\n print(str(e))\n logging.error(str(e))\n # try to send alert mail if db query fail\n # if send_mail fail, send baidu gaojing\n # if sending baidu gaojing fail do nothing\n if send_mail(item['mail_smtp'], item['mail_login_user'], item['mail_login_password'],\n item['mail_fail_recipients'],\n subject=item['mail_fail_sub'],\n content=item['mail_fail_cont'],\n # attach_files should be a list or tuple.\n attach_files=[log_file,]):\n exit()\n else:\n send_gaojing(configure.baidu_gaojing_info['service_id'], configure.baidu_gaojing_info['token'],\n item['mail_fail_cont'])\n exit()\n # the above all successful, write query result into excel file under current path\n if not os.path.isdir(sql_item['local_path']):\n os.mkdir(sql_item['local_path'])\n file_to_write = os.path.join(sql_item['local_path'], sql_item['attached_file']) + \\\n '_' + arrow.now().format('YYYYMMDD') + '.xlsx'\n write_xls(file_to_write, sql_item['sheet_name'], content)\n print(\"file: %s written finished\" % file_to_write)\n total_attached_files.append(file_to_write)\n # if cation is \"upload_ftp\" upload earch file\n if item['action'] == 'upload_ftp':\n # two times to send file to ftp\n if not send_ftp(item['ftp_host'], item['ftp_user'], item['ftp_password'], file_to_write,\n sql_item['remote_path']):\n time.sleep(3)\n if not send_ftp(item['ftp_host'], item['ftp_user'], item['ftp_password'], file_to_write,\n sql_item['remote_path']):\n send_gaojing(configure.baidu_gaojing_info['service_id'], configure.baidu_gaojing_info['token'],\n item['fail_cont'])\n exit()\n logging.info(\"successful in upload file %s to ftp\" % file_to_write)\n print(\"successful in upload file %s to ftp\" % file_to_write)\n\n # if action is \"send_mail\", send all files in a task at one time\n if item['action'] == 'send_mail':\n # in send_email function two times will try, so here just execute once.\n if not send_mail(item['mail_smtp'], item['mail_login_user'], item['mail_login_password'],\n item['mail_success_recipients'],\n cc_recipients=item['mail_success_cc_recipients'],\n bcc_recipients=item['mail_success_bcc_recipients'],\n subject=item['mail_success_sub'],\n content=item['mail_success_cont'],\n attach_files=total_attached_files):\n send_gaojing(configure.baidu_gaojing_info['service_id'], configure.baidu_gaojing_info['token'],\n item['mail_fail_cont'])\n exit()\n for item in total_attached_files:\n logging.info(\"successful in send file %s using mail\" % item)\n print(\"successful in send file %s using mail\" % item)\n" }, { "alpha_fraction": 0.4235852360725403, "alphanum_fraction": 0.45035579800605774, "avg_line_length": 40.272727966308594, "blob_id": "c4df5bde34a2eda874633b165ab51673560e4041", "content_id": "96135ebb609b9f16c4e14cf746904feff2aeff35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6570, "license_type": "no_license", "max_line_length": 143, "num_lines": 143, "path": "/configure_fake.py", "repo_name": "zwg-zero/sql_query", "src_encoding": "UTF-8", "text": "#! -*- coding: utf-8 -*-\nbaidu_gaojing_fatal = {\"service_id\": \"0000\",\n \"token\": \"8240ffe79b479b248e9c6a8\",\n }\nbaidu_gaojing_info = {\"service_id\": \"1111\",\n \"token\": \"307d439a02c79335897fff552\",\n }\n\ntask = (\n {'execute_time': '1st every month 7:40', # this is just for classify\"\n 'action': 'send_mail',\n 'mail_smtp': 'smtp.mailserver.net',\n 'mail_login_user': '[email protected]',\n 'mail_login_password': 'password@youremail',\n 'mail_success_recipients': '[email protected], [email protected]', # recipients should be in a string separated by comma\n 'mail_success_cc_recipients': '[email protected], [email protected]',\n 'mail_success_bcc_recipients': '', # same as above\n 'mail_fail_recipients': '[email protected], [email protected]',\n 'mail_success_sub': u'单标投资明细-月',\n 'mail_fail_sub': u'FAIL IN 单标投资明细-月',\n 'mail_success_cont': u'Dear xxxx,\\n\\n 关于2.0单标(按月)的结果已经发到邮箱,请您注意查收,谢谢。',\n 'mail_fail_cont': u'FAIL IN 单标投资明细-月',\n 'database': [\n {\n 'local_path': u'/reports/单标投资明细-月',\n 'attached_file': u'loan_detail_month.xlsx',\n 'sheet_name': u'单标投资明细-月',\n 'host':'172.16.0.1', 'port': 3306, 'user': 'databaseuser', 'passwd': 'databasepassword', 'db': 'youdatabase',\n 'sql':\n \"\"\"\n select\n '2.0' as '平台',\n t6.pk_id as '投资id',\n t7.vc_name as '单标名称',\n t7.nb_period as '锁定期',\n case t7.dc_period_type\n when '00' then '按天算'\n when '01' then '按月算'\n end as '计息方式(按月/按天)',\n concat(left(t7.nb_rate*100,4),'%') as '年化收益率',\n (t6.nb_amount) as '投资金额(元)',\n t3.vc_name as '投资人姓名',\n t3.vc_cellphone as '投资人手机号',\n t4.vc_value as '投资人身份证号',\n t2.vc_name as '邀请人姓名',\n t2.vc_cellphone as '邀请人手机号',\n t5.vc_value as '邀请人身份证号',\n '单标' as '计划类型',\n \"\"\"\n },\n # other items in database put here.\n ],\n },\n {'execute_time': '1st every month 7:40', # this is just for classify\"\n 'action': 'send_mail',\n 'mail_smtp': 'smtp.server.net',\n 'mail_login_user': '[email protected]',\n 'mail_login_password': 'passordofmaillogin',\n 'mail_success_recipients': '[email protected], [email protected]', # recipients should be in a string separated by comma\n 'mail_success_cc_recipients': '[email protected]',\n 'mail_success_bcc_recipients': '', # same as above\n 'mail_fail_recipients': '[email protected], [email protected]',\n 'mail_success_sub': u'后台投资明细-月',\n 'mail_fail_sub': u'FAIL IN 后台投资明细-月',\n 'mail_success_cont': u'Dear xxxx,\\n\\n 关于2.0投资明细(按月)的结果已经发到邮箱,请您注意查收,谢谢。',\n 'mail_fail_cont': u'FAIL IN 后台投资明细-月',\n 'database': [\n {\n 'local_path': u'/reports/后台投资明细-月',\n 'attached_file': u'investment_details_month.xlsx',\n 'sheet_name': u'后台投资明细-月',\n 'host':'172.16.0.1', 'port': 3306, 'user': 'username', 'passwd': 'password', 'db': 'xxxx',\n 'sql':\n \"\"\"\n select\n '2.0' as '平台',\n t4.pk_id as '投资id',\n t5.vc_name as '计划名称',\n t5.nb_period as '锁定期',\n case t5.dc_period_type\n when '00' then '按天'\n when '01' then '按月'\n end as '计息方式',\n concat(left(t5.nb_rate*100,4),'%') as '年化收益率',\n (t4.nb_amount) as '投资金额',\n t3.vc_name as '投资人姓名',\n t3.vc_cellphone as '投资人手机号',\n t6.vc_value as '投资人身份证号',\n t2.vc_name as '邀请人姓名',\n t2.vc_cellphone as '邀请人手机号',\n t7.vc_value as '邀请人身份证号',\n t8.vc_name as '计划类型',\n t4.dt_datetime_start as '起息时间',\n order by 10\n \"\"\"\n },\n ],\n },\n {'action': 'upload_ftp',\n 'ftp_host': '10.13.2.52',\n 'ftp_user': 'xxxxx',\n 'ftp_password': 'xxxxx',\n 'fail_cont': u'FAIL IN 战企月初ftp',\n 'mail_smtp': 'smtp.mailserver.net',\n 'mail_login_user': '[email protected]',\n 'mail_login_password': 'password',\n 'mail_fail_recipients': 'mailrecipient1@mailserver, [email protected]',\n 'mail_fail_sub': u'FAIL IN 战企月初ftp',\n 'mail_fail_cont': u'FAIL IN 战企月初ftp',\n 'database': [\n {\n 'local_path': u'/reports/交明细-单标',\n 'attached_file': u'Deal_Loan.xlsx',\n 'remote_path': u'成交明细-单标',\n 'sheet_name': u'成交明细-单标',\n 'host':'172.16.1.9', 'port': 3306, 'user': 'ername', 'passwd': 'password', 'db': 'database',\n 'sql':\n \"\"\"\n select\n u.pk_id as '用户id',u.vc_name as '投资人姓名',up.vc_value as '身份证号',u.vc_account as '投资人用户名',t.money as '成交金额',date(t.dtime) as '成交日期'\n from\n (SELECT i.nb_amount as 'money',date(i.dt_datetime) as 'dtime' ,i.pk_id\n from fiz_invest i ,fiz_loan l\n where i.fk_loan_id = l.pk_id\n and l.dc_type in ('00','01')\n and i.dc_status in ('10','90','99')\n and i.dt_datetime >= '2015-05-11 00:00:00' and i.dt_datetime <= now()\n )as t\n inner join fiz_invest i\n on i.pk_id=t.pk_id\n left join fiz_user u\n on i.fk_user_id = u.pk_id\n left join\n (select * from fiz_user_prop\n where vc_code = 'code-identity-number'\n ) up\n on u.pk_id = up.fk_user_id\n \"\"\"\n },\n # other items of database come here\n ],\n },\n)\n" }, { "alpha_fraction": 0.7333333492279053, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 19.75, "blob_id": "1526ed6184616d336f6c7003cfa3436df6de03c5", "content_id": "a3b6de594a80051e3827813737a3c182a26384ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 181, "license_type": "no_license", "max_line_length": 99, "num_lines": 8, "path": "/README.md", "repo_name": "zwg-zero/sql_query", "src_encoding": "UTF-8", "text": "### 功能\n* generate report from mysql query and write out a excel file '.xlsx' then send to desired receiver\n\n### 使用的主要python库\n* MySQLdb\n* smtplib\n* email\n* xlsxwriter" } ]
3
khangesh9420/CARD-VALIDATION
https://github.com/khangesh9420/CARD-VALIDATION
a1e45a6d9eff71a7d34c4c99f31879fea52a5397
56ab00de6d7ce6ab2360fb3f1028f3157789eb9f
3f48be42ba4b14c5c0af7d6ed135252b057d7188
refs/heads/main
2023-02-14T16:14:20.678997
2021-01-11T11:11:50
2021-01-11T11:11:50
301,655,730
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6633986830711365, "alphanum_fraction": 0.6764705777168274, "avg_line_length": 30.689655303955078, "blob_id": "c818e1d0cf4c0b070bd489ea9919d36e6d658113", "content_id": "9fabe221e99b0ea2c47051bcd676f3c60a36047e", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 918, "license_type": "permissive", "max_line_length": 68, "num_lines": 29, "path": "/main.py", "repo_name": "khangesh9420/CARD-VALIDATION", "src_encoding": "UTF-8", "text": "# Enter the card number by User and use .strip() for unwanted spaces\ncard_number = list(input(\"Enter the card number :\").strip())\n# Remove the last digit i.e check digit\ncheck_digit =(card_number.pop())\n#Reverse the card_number\ncard_number.reverse()\n# make an empty list \nfuture_use = []\n#check for the even index and odd Index\nfor index,digit in enumerate (card_number) :\n if index % 2 == 0 :\n # make an even value double \n double_digit = int(digit) * 2\n #if double digit is greater than 9 then substract it from 9\n if double_digit > 9 :\n double_digit = double_digit - 9\n\n future_use.append(double_digit) \n else :\n # odd number value remains same\n future_use.append(int(digit))\n\ntotal = int(check_digit) + sum(future_use)\nprint(total)\n # check the number is divisible by 10 or not\nif total % 10 == 0 :\n print(\" The card number valid\")\nelse :\n print(\"The card number invalid\")" }, { "alpha_fraction": 0.8031591773033142, "alphanum_fraction": 0.8128796815872192, "avg_line_length": 204.75, "blob_id": "bbcba35945ed52891a629a31dc01deafff4f9a07", "content_id": "d3261983211887f4ab8ada9a07a90ab2bb380785", "detected_licenses": [ "LicenseRef-scancode-public-domain" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 823, "license_type": "permissive", "max_line_length": 689, "num_lines": 4, "path": "/README.md", "repo_name": "khangesh9420/CARD-VALIDATION", "src_encoding": "UTF-8", "text": "# CARD-VALIDATION program using python\n# I used the Luhn algorithm\n# The Luhn algorithm, also known as the modulus 10 or mod 10 algorithm, is a simple checksum formula used to validate a variety of identification numbers, such as credit card numbers, IMEI numbers, Canadian Social Insurance Numbers. The LUHN formula was created in the late 1960s by a group of mathematicians. Shortly thereafter, credit card companies adopted it. Because the algorithm is in the public domain, it can be used by anyone. Most credit cards and many government identification numbers use the algorithm as a simple method of distinguishing valid numbers from mistyped or otherwise incorrect numbers. It was designed to protect against accidental errors, not malicious attacks.\n# Please use the code anyone will want and can edit for the same.\n" } ]
2
sihsob/proof-visualizer
https://github.com/sihsob/proof-visualizer
2e0e2f23a188cb1de36aa08574bbebc7586fc751
59bbb61b1f85e93ad307a8397dcf7f516f7df913
68daa15894bdf7b399a3dc9a38d678f177b086bc
refs/heads/master
2020-05-23T11:15:31.356623
2017-01-29T20:29:03
2017-01-29T20:29:03
80,369,674
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.46862348914146423, "alphanum_fraction": 0.46963563561439514, "avg_line_length": 24.70270347595215, "blob_id": "e0140eb23157c7781743cd3c460e58b86a8dd81e", "content_id": "16c61073b84fded07dffe1d2a9fab5e092aafe6b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 988, "license_type": "permissive", "max_line_length": 55, "num_lines": 37, "path": "/validation.py", "repo_name": "sihsob/proof-visualizer", "src_encoding": "UTF-8", "text": "import json\r\nfrom rules import *\r\n\r\ndef validate(parsed_j):\r\n print parsed_j\r\n\r\n line_num = parsed_j['label']\r\n logic_val = parsed_j['sentence']\r\n reason = parsed_j['justification']\r\n reference = parsed_j['reference']\r\n\r\n validation_map = {\r\n \"REIT\" : reit,\r\n \"OR I\" : orIntro,\r\n \"OR E\" : orElim,\r\n \"AND I\" : andIntro,\r\n \"AND E\" : andElim,\r\n \"NOT I\" : notIntro,\r\n \"NOT E\" : notElim,\r\n \"CONTRA I\" : contraIntro,\r\n \"CONTRA E\" : contraElim,\r\n \"IMP I\" : impIntro,\r\n \"IMP E\" : impElim,\r\n \"BI I\" : biIntro,\r\n \"BI E\" : biElim\r\n }\r\n\r\n if len(reference) == 0 and reason == \"\":\r\n return { \"result\": True }\r\n else:\r\n try:\r\n return {\r\n \"result\": validation_map[reason](\r\n logic_val, reference) }\r\n except ValueError:\r\n print 'UNSUPPORTED LOGIC RULE'\r\n return { \"result\": False }\r\n" }, { "alpha_fraction": 0.6338028311729431, "alphanum_fraction": 0.6478873491287231, "avg_line_length": 22.66666603088379, "blob_id": "16a57d51c9f51db36642702041a353d979cf58b5", "content_id": "9848a91853c636c678a013f5e38f5697eb56021e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 284, "license_type": "permissive", "max_line_length": 48, "num_lines": 12, "path": "/server.py", "repo_name": "sihsob/proof-visualizer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom flask import Flask, request, jsonify\napp = Flask(__name__)\nfrom validation import validate\n\[email protected]('/', methods=['POST'])\ndef index():\n return jsonify(validate(request.get_json()))\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n" }, { "alpha_fraction": 0.6285305619239807, "alphanum_fraction": 0.6385412812232971, "avg_line_length": 28.44210433959961, "blob_id": "be3fabea0e3f465c3070b1cf06678fffcbd3cc67", "content_id": "63ae69ac9ec5e31b4693f3b34b872b5589bdf895", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2797, "license_type": "permissive", "max_line_length": 96, "num_lines": 95, "path": "/statement.py", "repo_name": "sihsob/proof-visualizer", "src_encoding": "UTF-8", "text": "class Statement(object):\n\n def __init__(self, value):\n self._value = value\n\n @property\n def value(self):\n return self._value\n\nclass BinaryStatement(Statement):\n\n def __init__(self, value, arg1, arg2):\n super(BinaryStatement, self).__init__(value)\n self._left = arg1\n self._right = arg2\n\n @property\n def left(self):\n return self._left\n @property\n def right(self):\n return self._right\n\nclass UnaryStatement(Statement):\n\n def __init__(self, value, argument):\n super(UnaryStatement, self).__init__(value)\n self._child = argument\n\n @property\n def child(self):\n return self._child\n\nclass IdStatement(Statement):\n\n def __init__(self, value):\n super(IdStatement, self).__init__(value)\n\nclass ContradictionStatement(Statement):\n\n def __init__(self):\n super(ContradictionStatement, self).__init__(\"!\")\n\ndef print_tree(tree, level=0):\n if isinstance(tree, BinaryStatement):\n # print \"level\", level, \":\", tree.value\n print \"\\t\"*level, tree.value\n print_tree(tree.left, level + 1)\n print_tree(tree.right, level + 1)\n elif isinstance(tree, UnaryStatement):\n # print \"level\", level, \":\", tree.value\n print \"\\t\"*level, tree.value\n print_tree(tree.child, level + 1)\n elif isinstance(tree, IdStatement):\n print \"\\t\"*level, tree.value\n # print \"level\", level, \":\", tree.value\n elif isinstance(tree, ContradictionStatement):\n print \"\\t\"*level, tree.value\n # print \"level\", level, \":\", tree.value\n else:\n print \"WARNING: could not understand tree type\"\n assert False\n\ndef getAllOrOperands(statement, ors):\n if statement.value != '|':\n ors.append(statement)\n return ors\n\n ors.append(statement.left)\n return getAllOrOperands(statement.right, ors)\n\ndef getAllAndOperands(statement, ands):\n if statement.value != '&':\n ands.append(statement)\n return ands\n\n ands.append(statement.left)\n return getAllAndOperands(statement.right, ands)\n\n# Literal comparison\ndef compareTree(tree1, tree2):\n if isinstance(tree1, IdStatement) and isinstance(tree2, IdStatement):\n if tree1.value == tree2.value:\n return True\n else: return False\n if isinstance(tree1, ContradictionStatement) and isinstance(tree2, ContradictionStatement):\n return True\n\n if tree1.value == tree2.value:\n if isinstance(tree1, BinaryStatement) and isinstance(tree2, BinaryStatement):\n return compareTree(tree1.left, tree2.left) and compareTree(tree1.right, tree2.right)\n elif isinstance(tree1, UnaryStatement) and isinstance(tree2, UnaryStatement):\n return compareTree(tree1.child, tree2.child)\n\n return False\n" }, { "alpha_fraction": 0.5656962990760803, "alphanum_fraction": 0.5786781907081604, "avg_line_length": 20.542373657226562, "blob_id": "7f67ebdb6520477303a7a59ecb138d6a685195e1", "content_id": "61b8bf7de05745a4d1cba95033934e606c9e6a9c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2542, "license_type": "permissive", "max_line_length": 72, "num_lines": 118, "path": "/parse_tree.py", "repo_name": "sihsob/proof-visualizer", "src_encoding": "UTF-8", "text": "from __future__ import print_function\nimport statement\n\ntokens = (\n # meaningful symbols\n 'NOT',\n 'AND',\n 'OR',\n 'IMPLICATION',\n 'BICONDITIONAL',\n 'CONTRADICTION',\n 'ID',\n\n # non-meaningful symbols\n 'LPAREN',\n 'RPAREN'\n)\n\nt_ID = r'[a-zA-Z]'\nt_NOT = r'~'\nt_AND = r'&'\nt_OR = r'\\|'\nt_IMPLICATION = r'-'\nt_BICONDITIONAL = r'='\nt_CONTRADICTION = r'!'\nt_LPAREN = r'\\('\nt_RPAREN = r'\\)'\n\n# ignored characters\nt_ignore = ' \\t'\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += t.value.count(\"\\n\")\n\ndef t_error(t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n\n# lexer\nimport ply.lex as lex\nlexer = lex.lex()\n\n# Grammar\n# S: ! | E\n# E: A -> E | A -> ! | A <--> E | A\n# A: B|A | B & A | B\n# B: ~B | C\n# C: (E) | id\n\n# parsing rules\nprecedence = (\n ('left','AND','OR',\n 'IMPLICATION',\n 'BICONDITIONAL',\n 'CONTRADICTION'),\n ('right', 'NOT')\n)\n\ndef p_statement_contradiction(t):\n 'statement : CONTRADICTION'\n t[0] = statement.ContradictionStatement()\n tree = t[0] # Not sure what to do about attribute rules\ndef p_statement_expression(t):\n 'statement : expression'\n t[0] = t[1]\n tree = t[0]\n\ndef p_expression_implication(t):\n 'expression : binary IMPLICATION expression'\n t[0] = statement.BinaryStatement(\"-\", t[1], t[3])\ndef p_expression_contrasubproof(t):\n 'expression : binary IMPLICATION CONTRADICTION'\n t[0] = statement.BinaryStatement(\"-\", t[1],\n statement.ContradictionStatement())\n\ndef p_expression_biconditional(t):\n 'expression : binary BICONDITIONAL expression'\n t[0] = statement.BinaryStatement(\"=\", t[1], t[3])\ndef p_expression_pass(t):\n 'expression : binary'\n t[0] = t[1]\n\ndef p_binary_or(t):\n 'binary : unary OR binary'\n t[0] = statement.BinaryStatement(\"|\", t[1], t[3])\ndef p_binary_and(t):\n 'binary : unary AND binary'\n t[0] = statement.BinaryStatement(\"&\", t[1], t[3])\ndef p_binary_pass(t):\n 'binary : unary'\n t[0] = t[1]\n\ndef p_unary_neg(t):\n 'unary : NOT unary'\n t[0] = statement.UnaryStatement(\"~\", t[2])\ndef p_unary_pass(t):\n 'unary : root'\n t[0] = t[1]\n\ndef p_root_paren(t):\n 'root : LPAREN expression RPAREN'\n t[0] = t[2]\ndef p_root_id(t):\n 'root : ID'\n t[0] = statement.IdStatement(t[1])\n\ndef p_error(t):\n print(\"Syntax error at '%s'\" % t.value)\n\nimport ply.yacc as yacc\nparser = yacc.yacc()\n\ndef parse_sentence(s):\n ret = parser.parse(s)\n print(\"DEBUG\")\n statement.print_tree(ret)\n return ret\n" }, { "alpha_fraction": 0.5878178477287292, "alphanum_fraction": 0.5878178477287292, "avg_line_length": 29.196428298950195, "blob_id": "a3828a53b62bc0847885a623bf026f6d35a08ee4", "content_id": "12f2aa3cf37b329151d011a39ba94640deaed098", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1691, "license_type": "permissive", "max_line_length": 79, "num_lines": 56, "path": "/www/js/control.js", "repo_name": "sihsob/proof-visualizer", "src_encoding": "UTF-8", "text": "$('.dropdown-menu a').on('click', function(){\n $('.dropdown-toggle').html($(this).html() + '<span class=\"caret\"></span>');\n});\n\n$(document).ready(function() {\n var prevReferences = null;\n var logicRule = null;\n\n // requires some nodes be highlighted\n // already and sets prevReferences\n function onCreateReferences(e) {\n e.preventDefault();\n console.log(\"onCreateReferences()\");\n if (prevReferences !== null)\n return;\n if (logicRule !== null)\n return;\n\n var inferenceRules = $('#inference-rule').find(\":selected\").val();\n console.log(\" -- got inference rule '\" + inferenceRules + \"'\");\n\n // TODO: get highlighted nodes,\n // set prevReferences\n // TODO: set logicRule\n }\n\n // requires prevReferences to have a\n // non-null value, and that a single\n // node be highlighted. Sets go.js pane\n // accordingly\n function onAttachReferences() {\n console.log(\"onAttachReferences()\");\n if (prevReferences === null)\n return;\n if (logicRule === null)\n return;\n\n // TODO: get prevReferences\n // TODO: set newly selected element to have\n // inference rule of logicRule\n\n logicRule = null;\n prevReferences = null;\n }\n\n // just create a node; the statement can be typed in by\n // the user, and the references can be chosen arbitrarily\n function onCreateNode() {\n console.log(\"onCreateNode()\");\n // TODO: just create a node\n }\n\n $(\"#create-node\").click(onCreateNode);\n $(\"#create-refs\").on('submit', onCreateReferences);\n $(\"#attach-refs\").click(onAttachReferences);\n});\n" }, { "alpha_fraction": 0.8215526938438416, "alphanum_fraction": 0.8215526938438416, "avg_line_length": 122.28571319580078, "blob_id": "868ce532be50d14127185910585c5e58523a735f", "content_id": "127ecda88ec76a3990f6487e88cab30282406c0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 863, "license_type": "permissive", "max_line_length": 556, "num_lines": 7, "path": "/README.md", "repo_name": "sihsob/proof-visualizer", "src_encoding": "UTF-8", "text": "# proof-visualizer\nVisualize Fitch styled proofs in the form of argument diagrams with proof validation.\n\n###What are Arugument Diagrams?\nTraditionally used to represent arguments in natural language in a tree like structure. Argument diagrams show the structure of arguments and allows one to easily find the important parts of the diagram. These diagrams also help to show the structure and \"lines of reasoning\". The method of creating these diagrams also helps to remove redundancies and unnecessary information. Conveniently, argument diagrams transfer over fairly well for formal proofs. Proofs can now be visually represented while retaining their step-by-step process and subproofs.\n\nFor more information: https://en.wikipedia.org/wiki/Argument_map or http://www.cogsci.rpi.edu/~heuveb/teaching/CriticalThinking/Web/Presentations/ArgumentDiagrams.pdf\n" }, { "alpha_fraction": 0.6055582761764526, "alphanum_fraction": 0.615147054195404, "avg_line_length": 27.095890045166016, "blob_id": "2eaafce6087cfab77bfc454b563eeb28654cb0cb", "content_id": "1a54bf67e57a54071dbb48a25f315aaeb3073bc7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6153, "license_type": "permissive", "max_line_length": 79, "num_lines": 219, "path": "/rules.py", "repo_name": "sihsob/proof-visualizer", "src_encoding": "UTF-8", "text": "from parse_tree import parse_sentence\nfrom statement import *\nimport statement\n\ndef reit(logic_val, reference):\n if len(reference) > 1:\n return False\n\n logic_tree = parse_sentence(logic_val)\n ref_tree = parse_sentence(reference[0])\n\n return compareTree(logic_tree, ref_tree)\n\ndef orIntro(logic_val, reference):\n if len(reference) > 1:\n return False\n\n logic_tree = parse_sentence(logic_val)\n ref_tree = parse_sentence(reference[0])\n temp_tree = logic_tree\n\n # TODO: Make sure all top level are ors\n #while isinstance(temp_tree, BinaryStatement):\n # if not temp_tree.value == '|':\n # return False\n # temp_tree = temp_tree.left\n\n ors = getAllOrOperands(logic_tree, [])\n for i in ors:\n print_tree(i)\n if compareTree(i, ref_tree) == True:\n return True\n return False\n\ndef orElim(logic_val, reference):\n if len(reference) == 0:\n return False\n\n logic_tree = parse_sentence(logic_val)\n ref_tree = parse_sentence(reference[0])\n ors = getAllOrOperands(ref_tree, [])\n\n #TODO: Make sure all top level are ors\n\n for ref in reference[1:]:\n ref_tree2 = parse_sentence(ref)\n for i in ors:\n if compareTree(ref_tree2.left, i) == True:\n break\n else: return False\n\n for ref in reference[1:]:\n ref_tree2 = parse_sentence(ref)\n if compareTree(ref_tree2.right, logic_tree) == False:\n return False\n return True\n\ndef andIntro(logic_val, reference):\n if len(reference) == 0:\n return False\n\n logic_tree = parse_sentence(logic_val)\n refs = []\n for i in reference:\n refs.append(parse_sentence(i))\n\n # TODO: Make sure all top level are ands\n\n ands = getAllAndOperands(logic_tree, [])\n for i in ands:\n for j in refs:\n if compareTree(i, j) == True:\n break\n else:\n return False\n return True\n\ndef andElim(logic_val, reference):\n if len(reference) > 1:\n return False\n\n logic_tree = parse_sentence(logic_val)\n ref_tree = parse_sentence(reference[0])\n temp_tree = ref_tree\n\n # TODO: Make sure all top level are ands\n #while isinstance(temp_tree, BinaryStatement):\n # if not temp_tree.value == '|':\n # return False\n # temp_tree = temp_tree.left\n\n ands = getAllAndOperands(ref_tree, [])\n for i in ands:\n if compareTree(i, logic_tree) == True:\n return True\n return False\n\ndef notIntro(logic_val, reference):\n if len(reference) > 1:\n return False\n\n logic_tree = parse_sentence(logic_val)\n ref_tree = parse_sentence(reference[0]) # this is a subproof\n if not ref_tree.value == \"-\": # subproof condition\n return False\n if not isinstance(ref_tree.right, ContradictionStatement):\n return False\n\n neg = UnaryStatement(\"~\", ref_tree.left)\n return compareTree(neg, logic_tree)\n\ndef notElim(logic_val, reference):\n if len(reference) > 1:\n return False\n logic_tree = parse_sentence(logic_val)\n ref_tree = parse_sentence(reference[0])\n if not ref_tree.value == \"~\":\n return False\n if not ref_tree.child.value == \"~\":\n return False\n\n return compareTree(ref_tree.child.child, logic_tree)\n\ndef contraIntro(logic_val, reference):\n if len(reference) > 2:\n return False\n if not logic_val == \"!\":\n return False\n\n # Statements could have been selected out of order\n ref_tree1 = parse_sentence(reference[0])\n ref_tree2 = parse_sentence(reference[1])\n neg = UnaryStatement(\"~\", ref_tree1)\n if compareTree(ref_tree2, neg) == True:\n return True\n else:\n ref_tree1 = parse_sentence(reference[1])\n ref_tree2 = parse_sentence(reference[0])\n neg = UnaryStatement(\"~\", ref_tree1)\n if compareTree(ref_tree2, neg) == True:\n return True\n\n return False\n\ndef contraElim(logic_val, reference):\n if len(reference) > 1:\n return False\n\n ref_tree = parse_sentence(reference[0])\n return compareTree(ref_tree, ContradictionStatement())\n\ndef impIntro(logic_val, reference):\n if len(reference) > 1:\n return False\n\n logic_tree = parse_sentence(logic_val)\n ref_tree = parse_sentence(reference[0])\n\n return compareTree(logic_tree, ref_tree)\n\ndef impElim(logic_val, reference):\n if len(reference) > 2:\n return False\n\n ref_tree = parse_sentence(reference[0])\n logic_tree = parse_sentence(\"(\" + reference[1] + \") - (\" + logic_val + \")\")\n\n return compareTree(ref_tree, logic_tree)\n\ndef biIntro(logic_val, reference):\n if len(reference) > 2:\n return False\n\n logic_tree = parse_sentence(logic_val)\n if not logic_tree.value == \"=\":\n return False\n\n ref_tree1 = parse_sentence(reference[0])\n ref_tree2 = parse_sentence(reference[1])\n\n if compareTree(ref_tree1.left, ref_tree2.right) == False:\n return False\n if compareTree(ref_tree2.left, ref_tree1.right) == False:\n return False\n\n if compareTree(logic_tree.left, ref_tree1.left) == True and \\\n compareTree(logic_tree.right, ref_tree1.right) == True:\n return True\n elif compareTree(logic_tree.left, ref_tree2.left) == True and \\\n compareTree(logic_tree.right, ref_tree2.right) == True:\n return True\n else: return False\n\n\ndef biElim(logic_val, reference):\n if len(reference) > 2:\n return False\n\n ref_tree = parse_sentence(reference[0])\n logic_tree = parse_sentence(\"(\" + reference[1] + \") = (\" + logic_val + \")\")\n\n if compareTree(ref_tree, logic_tree) == True:\n return True\n\n logic_tree = parse_sentence(\"(\" + logic_val + \") = (\" + reference[1] + \")\")\n if compareTree(ref_tree, logic_tree) == True:\n return True\n\n ref_tree = parse_sentence(reference[1])\n logic_tree = parse_sentence(\"(\" + reference[0] + \") = (\" + logic_val + \")\")\n\n if compareTree(ref_tree, logic_tree) == True:\n return True\n\n logic_tree = parse_sentence(\"(\" + logic_val + \") = (\" + reference[0] + \")\")\n if compareTree(ref_tree, logic_tree) == True:\n return True\n\n return False\n" } ]
7
detonih/data_master
https://github.com/detonih/data_master
0897584be4f2d472a9daf4cf80c0439c2de86129
68075ed73b21a579e2f751bcca559da3e571bdf9
3d50e5ef3f25e2af3a77ded32f8fa9bdca7ad045
refs/heads/master
2023-07-27T21:25:07.914784
2021-09-06T18:40:28
2021-09-06T18:40:28
399,976,027
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6705426573753357, "alphanum_fraction": 0.748062014579773, "avg_line_length": 28.769229888916016, "blob_id": "c5d4ec7057dc186195b9f24f3820d24de574f9c8", "content_id": "4c407157813d7d12c26123effe579578ddaaab0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 774, "license_type": "no_license", "max_line_length": 326, "num_lines": 26, "path": "/Makefile", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "build:\n\tdocker build -t hadoop-cluster .\n\nrun:\n\tdocker run -itd --name hadoop-env --hostname localhost -v /home/detonih/Documents/data_master/project/data_master/raw-data:/raw-data -v /home/detonih/Documents/data_master/project/data_master/scripts:/scripts -p 8888:8888 -p 8998:8998 -p 4040:4040 -p 50070:50070 -p 50075:50075 -p 8088:8088 -p 8042:8042 hadoop-cluster:latest\n\nup:\n\tdocker-compose up\n\nbash:\n\tdocker exec -it hadoop-env /bin/bash\n\nmysql:\n\tdocker exec -it data_master_db /bin/bash\n\nmongo:\n\tdocker exec -it data_master_mongo_db /bin/bash\n\nprune:\n\tdocker stop hadoop-env \n\tdocker stop data_master_db\n\tdocker stop data_master_mongo_db\n\tdocker container prune\n\nddl:\n\tdocker exec -i data_master_db mysql -u root -p${MYSQL_ROOT_PASSWORD} < ./scripts/sql/enade_ddl.sql\n" }, { "alpha_fraction": 0.6849710941314697, "alphanum_fraction": 0.7225433588027954, "avg_line_length": 37.33333206176758, "blob_id": "2304fc1ea7f2cbf23664a07f62d8b4ac684f4d77", "content_id": "c92bc395a8c5e19f285f3647413bcd52170f907f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 346, "license_type": "no_license", "max_line_length": 141, "num_lines": 9, "path": "/scripts/sh/pyspark_mongo.sh", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nspark-submit \\\n--master local[*] \\\n--deploy-mode client \\\n--jars /tmp/mongo-hadoop-spark-2.0.2.jar \\\n--driver-class-path /tmp/mongo-hadoop-spark-2.0.2.jar \\\n--py-files /tmp/mongo-hadoop/spark/src/main/python/pymongo_spark.py,/usr/local/lib/python3.6/dist-packages/pymongo_spark-0.1.dev0-py3.6.egg \\\n/scripts/python/pyspark_mongo.py\n\n" }, { "alpha_fraction": 0.5141552686691284, "alphanum_fraction": 0.5278539061546326, "avg_line_length": 22.826086044311523, "blob_id": "bc75aad5907dbb7bc3aa9615ec7318fefe653f6e", "content_id": "1ef874aebd670f33b27f20ca18c0ff9c214cc475", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1095, "license_type": "no_license", "max_line_length": 91, "num_lines": 46, "path": "/scripts/sh/get_tweets.sh", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nhashtag=$1\n# date_since format = yyyy-mm-dd\ndate_since=$2\nnumber_items=$3\nfile_path=$4\n\nif [ \"$#\" -ne 4 ]; then\n echo \"Wrong number of paramters\"\n exit 1\nfi\n\necho \"Getting tweets...\"\npython3 /scripts/python/get_tweets.py ${hashtag} ${date_since} ${number_items} ${file_path}\nif [ $? -eq 0 ]; then\n echo \"Tweets loaded into ${file_path}\"\n if [ -f \"$file_path\" ]; then\n echo \"Putting $file_path to hdfs...\"\n hdfs dfs -put -f ${file_path} /stage/tweets.csv\n\n if [ $? -eq 0 ]; then\n echo \"Put file done\"\n echo \"Deleting tweets raw data\"\n\n rm -rf ${file_path}\n if [ $? -eq 0 ]; then\n echo \"${file_path} deleted\"\n exit 0\n else\n echo \"problem trying to delete $file_path\"\n exit 1\n fi\n \n else\n echo \"We have a problem putting $EXTRACTED_FILE on hdfs\"\n exit 1\n fi\n else\n echo \"$file_path does not exist.\"\n exit 1\n fi\nelse\n echo \"We have a problem trying to get tweets\"\n exit 1\nfi" }, { "alpha_fraction": 0.7770800590515137, "alphanum_fraction": 0.7770800590515137, "avg_line_length": 28, "blob_id": "8b6956efd922bfa73b43f5e4d21cb0ad7091eb9b", "content_id": "9a44cc5734b8dcf0b52e1f97a0003becf76107aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 637, "license_type": "no_license", "max_line_length": 61, "num_lines": 22, "path": "/start.sh", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n/etc/init.d/ssh start\n\n$HADOOP_HOME/bin/hadoop namenode -format\n$HADOOP_HOME/sbin/start-dfs.sh\n$HADOOP_HOME/sbin/start-yarn.sh\n$HADOOP_HOME/sbin/hadoop-daemon.sh start namenode\n$HADOOP_HOME/sbin/hadoop-daemon.sh start datanode\n$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager\n$HADOOP_HOME/sbin/yarn-daemon.sh start nodemanager\n$HADOOP_HOME/sbin/mr-jobhistory-daemon.sh start historyserver\n\nhdfs dfs -mkdir -p /user/hive/warehouse\nhdfs dfs -chmod g+w /tmp\nhdfs dfs -chmod g+w /user/hive/warehouse\nhdfs dfs -mkdir /stage\nhdfs dfs -chmod g+w /stage\n\n$HIVE_HOME/bin/schematool -initSchema -dbType derby\n\ntail -f /dev/null" }, { "alpha_fraction": 0.5701438784599304, "alphanum_fraction": 0.6013189554214478, "avg_line_length": 22.18055534362793, "blob_id": "8fc798922deef4dc02263264a5a3ccdccd7f61a2", "content_id": "2cde2215440b215710948aab802e028e1e2acfaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1671, "license_type": "no_license", "max_line_length": 90, "num_lines": 72, "path": "/scripts/python/enade_pandas.py", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport sqlalchemy\nimport pymysql\nimport os\n\nenade = pd.read_csv(\n \"/raw-data/microdados_enade_2019.txt\",\n sep = ';',\n decimal = ',',\n low_memory=False\n)\n\nMYSQL_CONN_STRING = os.getenv('MYSQL_CONN_STRING')\nMYSQL_DATABASE = os.getenv('MYSQL_DATABASE')\n\n# TP_SEXO = sexo\n# NU_IDADE = idade\n# QE_I02 = raça\n# QE_I08 = renda familiar\n# QE_I17 = tipo de escola que cursou EM\n\nenade = enade[['TP_SEXO', 'NU_IDADE', 'QE_I02', 'QE_I08', 'QE_I17']]\n\n\nenade[\"SEXO\"] = enade.TP_SEXO.replace({\n \"M\": \"Masculino\",\n \"F\": \"Feminino\"\n})\n\nenade[\"RENDA_FAMILIAR\"] = enade.QE_I08.replace({\n \"A\": \"Até 1,5 salarios\",\n \"B\": \"De 1,5 a 3 salarios\",\n \"C\": \"De 3 a 4,5 salarios\",\n \"D\": \"De 4,5 a 6 salarios\",\n \"E\": \"De 6 a 10 salarios\",\n \"F\": \"De 10 a 30 salarios\",\n \"G\": \"Acima de 30 salarios\"\n})\n\nenade[\"COR\"] = enade.QE_I02.replace({\n \"A\": \"Branca\",\n \"B\": \"Preta\",\n \"C\": \"Amarela\",\n \"D\": \"Parda\",\n \"E\": \"Indígena\",\n \"F\": pd.NA,\n \" \": pd.NA\n})\n\nenade[\"TP_ESCOLA\"] = enade.QE_I17.replace({\n \"A\": \"Todo em escola publica.\",\n \"B\": \"Todo em escola privada (particular).\",\n \"C\": \"Todo no exterior.\",\n \"D\": \"A maior parte em escola publica.\",\n \"E\": \"A maior parte em escola privada (particular).\",\n \"F\": \"Parte no Brasil e parte no exterior.\",\n})\n\n\nenade = enade.drop(columns=['TP_SEXO','QE_I02', 'QE_I08', 'QE_I17'])\n\n\nconnection_string = MYSQL_CONN_STRING + \"/\" + MYSQL_DATABASE\nengine = sqlalchemy.create_engine(\n connection_string\n)\n\nenade.to_sql(\"enade_tratado\", con=engine, index=False, if_exists='append', chunksize=1000)\n\n# print(enade.head)\n# print(enade.values)\nprint(dict(enade.dtypes))" }, { "alpha_fraction": 0.6538548469543457, "alphanum_fraction": 0.6769161224365234, "avg_line_length": 31.29197120666504, "blob_id": "d4b70c377c3160a8ec8a0ed2048f858c0eaa4bea", "content_id": "843c3941cb68c5c23297ab8a0425696e2c34111e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 4423, "license_type": "no_license", "max_line_length": 165, "num_lines": 137, "path": "/Dockerfile", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "FROM ubuntu:18.04\n\nRUN apt-get update && apt-get install -y --no-install-recommends \\\n openjdk-8-jdk \\\n net-tools \\\n curl \\\n netcat \\\n gnupg \\\n libsnappy-dev \\\n openssh-server \\\n vim \\\n nano \\\n unzip \\\n wget \\\n rsync \\\n zip \\\n git \\\n && rm -rf /var/lib/apt/lists/*\n \nENV JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/\n\n##HADOOP\nENV HADOOP_VERSION=2.7.2\nENV HADOOP_URL https://archive.apache.org/dist/hadoop/common/hadoop-$HADOOP_VERSION/hadoop-$HADOOP_VERSION.tar.gz\n\nRUN set -x \\\n && curl -fSL \"$HADOOP_URL\" -o /tmp/hadoop.tar.gz \\\n && tar -xvf /tmp/hadoop.tar.gz -C /opt/ \\\n && rm /tmp/hadoop.tar.gz*\n\nENV HADOOP_HOME=/opt/hadoop-$HADOOP_VERSION\nENV HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop\nENV HADOOP_MAPRED_HOME=$HADOOP_HOME\nENV HADOOP_COMMON_HOME=$HADOOP_HOME\nENV HADOOP_HDFS_HOME=$HADOOP_HOME\nENV YARN_HOME=$HADOOP_HOME\nENV HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native\nENV HADOOP_OPTS=\"$HADOOP_OPTS -Djava.library.path=$HADOOP_HOME/lib/native\"\n\nCOPY config/core-site.xml $HADOOP_HOME/etc/hadoop/\nCOPY config/hdfs-site.xml $HADOOP_HOME/etc/hadoop/\nCOPY config/mapred-site.xml $HADOOP_HOME/etc/hadoop/\nCOPY config/yarn-site.xml $HADOOP_HOME/etc/hadoop/\n\n##HIVE\nENV HIVE_VERSION=2.1.0\n\nRUN set -x \\\n\t&& curl -fSL http://archive.apache.org/dist/hive/hive-$HIVE_VERSION/apache-hive-$HIVE_VERSION-bin.tar.gz -o /tmp/hive.tar.gz \\\n\t&& tar -xvf /tmp/hive.tar.gz -C /opt/ \\\n && rm /tmp/hive.tar.gz\n\nENV HIVE_HOME=/opt/apache-hive-$HIVE_VERSION-bin\nCOPY config/hive-site.xml $HIVE_HOME/conf/\nCOPY config/hive-env.sh $HIVE_HOME/conf/\n\n#SQOOP\nENV SQOOP_VERSION=1.4.7\nENV SQOOP_HOME=/opt/sqoop-${SQOOP_VERSION}.bin__hadoop-2.6.0\n\nRUN set -x \\ \n && curl -fSL https://archive.apache.org/dist/sqoop/${SQOOP_VERSION}/sqoop-${SQOOP_VERSION}.bin__hadoop-2.6.0.tar.gz -o /tmp/sqoop.tar.gz \\\n && tar -xvf /tmp/sqoop.tar.gz -C /opt/ \\\n && rm /tmp/sqoop.tar.gz \n\nRUN set -x \\ \n && curl -fSL https://downloads.mysql.com/archives/get/p/3/file/mysql-connector-java-8.0.21.tar.gz -o /tmp/mysql-connector-java-8.0.21.tar.gz \\\n && tar -xvf /tmp/mysql-connector-java-8.0.21.tar.gz -C /tmp/ \\\n && mv /tmp/mysql-connector-java-8.0.21/mysql-connector-java-8.0.21.jar ${SQOOP_HOME}/lib \\\n && rm -r /tmp/mysql-connector-java-8.0.21 \\\n && rm /tmp/mysql-connector-java-8.0.21.tar.gz\n\nRUN set -x \\\n && curl -fSL https://downloads.apache.org/commons/lang/binaries/commons-lang-2.6-bin.tar.gz -o /tmp/commons-lang-2.6-bin.tar.gz \\\n && tar -xvf /tmp/commons-lang-2.6-bin.tar.gz -C /tmp/ \\\n && mv /tmp/commons-lang-2.6/commons-lang-2.6.jar ${SQOOP_HOME}/lib \\\n && rm -r /tmp/commons-lang-2.6/ \\\n && rm /tmp/commons-lang-2.6-bin.tar.gz\n\nCOPY config/sqoop-env.sh $SQOOP_HOME/conf/\n\n##PYTHON\nRUN apt-get update && apt-get install -y --no-install-recommends \\\n\tpython3.6 \\\n\tpython3.6-dev \\\n\tpython3-pip \\\n\tpython3.6-venv\nRUN python3.6 -m pip install pip --upgrade\nRUN python3.6 -m pip install wheel\nRUN pip install pandas\nRUN pip install sqlalchemy\nRUN pip install pymysql\nRUN pip install pymongo\nRUN pip install setuptools\nRUN pip install tweepy\n\n##SPARK\nENV SPARK_VERSION=2.4.8\nENV SPARK_URL=https://www.apache.org/dist/spark/spark-$SPARK_VERSION/spark-$SPARK_VERSION-bin-hadoop2.7.tgz \n\nRUN set -x \\\n && curl -fSL \"$SPARK_URL\" -o /tmp/spark.tar.gz \\\n && tar -xvf /tmp/spark.tar.gz -C /opt/ \\\n && rm /tmp/spark.tar.gz*\n\nENV SPARK_HOME=/opt/spark-$SPARK_VERSION-bin-hadoop2.7\nENV PYSPARK_PYTHON=python3.6\n\n#MONGODB CONNECTOR WITH HADOOP\nWORKDIR /tmp/\nRUN set -x \\\n && curl -fSL https://repo1.maven.org/maven2/org/mongodb/mongo-hadoop/mongo-hadoop-spark/2.0.2/mongo-hadoop-spark-2.0.2.jar -o /tmp/mongo-hadoop-spark-2.0.2.jar \\\n && git clone https://github.com/mongodb/mongo-hadoop.git \\\n && cd mongo-hadoop/spark/src/main/python \\\n && python3 /tmp/mongo-hadoop/spark/src/main/python/setup.py install\nWORKDIR /\n\nENV PATH $PATH:$HADOOP_HOME/bin:$HIVE_HOME/bin:$SQOOP_HOME/bin:$SPARK_HOME/bin\nENV HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$HIVE_HOME/lib/*\n\nRUN \\\n ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa && \\\n cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys && \\\n chmod 0600 ~/.ssh/authorized_keys\n\nCOPY config/env.sh /tmp/env.sh\nRUN chmod a+x /tmp/env.sh\nRUN /tmp/env.sh\nRUN rm -f /tmp/env.sh\n\nRUN mkdir /scripts\nRUN mkdir /raw-data\n\nADD start.sh /start.sh\nRUN chmod a+x /start.sh\n\nCMD [\"sh\", \"-c\", \"/start.sh\"]" }, { "alpha_fraction": 0.5483871102333069, "alphanum_fraction": 0.5605599284172058, "avg_line_length": 29.370370864868164, "blob_id": "4ab0ff5274addac2b012155a2a90f2482f2358ad", "content_id": "8fa35025f4bcdc9104343f9091f81fe67f6093fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1643, "license_type": "no_license", "max_line_length": 81, "num_lines": 54, "path": "/scripts/sh/execute_flow_enade.sh", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"Getting data from enade...\"\nbash -x /scripts/sh/get_enade.sh\n\nif [ $? -eq 0 ]; then\n echo \"Get enade data successfully\"\n echo \"Loading data into MySQL database...\"\n\n python3 /scripts/python/enade_pandas.py\n\n if [ $? -eq 0 ]; then\n echo \"Data loaded into MySQL database successfully\"\n echo \"Creating database enade on Hive...\"\n hive -e \"CREATE DATABASE enade;\"\n\n if [ $? -eq 0 ]; then\n echo \"enade database created successfully on Hive\"\n echo \"Trying to import enade data from MySQL database to hive table...\"\n\n sqoop import --connect jdbc:mysql://172.21.0.1:3306/enade \\\n --driver com.mysql.cj.jdbc.Driver \\\n --username root \\\n --password ${MYSQL_ROOT_PASSWORD} \\\n --split-by id \\\n --columns id,NU_IDADE,SEXO,RENDA_FAMILIAR,COR,TP_ESCOLA \\\n --table enade_tratado \\\n --bindir /tmp/sqoop-root/compile \\\n --target-dir /user/root/enade_tratado \\\n --fields-terminated-by \",\" \\\n --hive-import \\\n --create-hive-table \\\n --hive-table enade.enade_tratado\n \n if [ $? -eq 0 ]; then\n echo \"Import data from MySQL successfully\"\n\n else\n echo \"We have a problem importing data from MySQL database\"\n exit 1\n fi\n \n else\n echo \"We have a problem creating enade database on Hive\"\n exit 1\n fi\n else\n echo \"We have a problem loading data into MySQL database\"\n exit 1\n fi\nelse\n echo \"We have a problem getting enade data\"\n exit 1\nfi\n\n\n\n" }, { "alpha_fraction": 0.6310272812843323, "alphanum_fraction": 0.6394129991531372, "avg_line_length": 19.782608032226562, "blob_id": "5f38e8368ef39bb36f249aa7cb2c9aba9e9941bc", "content_id": "a6f079ee908ea5a223f9025120bd458ec451674e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 477, "license_type": "no_license", "max_line_length": 53, "num_lines": 23, "path": "/scripts/python/pyspark_mongo.py", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "import pymongo_spark\nfrom pyspark.sql import SparkSession\nimport os\n\npymongo_spark.activate()\n\nMONGO_CONN_STRING = os.getenv('MONGO_CONN_STRING')\n\nspark = SparkSession.builder \\\n .appName(\"pyspark-mongo\") \\\n .enableHiveSupport() \\\n .getOrCreate()\n\nsql = \"\"\"\nSELECT * FROM enade.enade_tratado limit 1000\n\"\"\"\ndf = spark.sql(sql)\n\nrdd = df.rdd.map(tuple)\n\nrdd.saveToMongoDB(MONGO_CONN_STRING + '/admin.enade')\n\nspark.stop()" }, { "alpha_fraction": 0.6216216087341309, "alphanum_fraction": 0.6936936974525452, "avg_line_length": 21.299999237060547, "blob_id": "8a5a74c02ca9fb94a1afe0aeffa9d33602e0bd7e", "content_id": "76634ef5a1ed861ee19631627631b75dcd9f1e55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 222, "license_type": "no_license", "max_line_length": 42, "num_lines": 10, "path": "/scripts/sql/enade_ddl.sql", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "USE enade;\nCREATE TABLE IF NOT EXISTS enade_tratado (\n id int(10) NOT NULL AUTO_INCREMENT,\n NU_IDADE int(10),\n SEXO varchar(100),\n RENDA_FAMILIAR varchar(100),\n COR varchar(100),\n TP_ESCOLA varchar(100),\n KEY(id)\n);" }, { "alpha_fraction": 0.6646126508712769, "alphanum_fraction": 0.6676936745643616, "avg_line_length": 33.43939208984375, "blob_id": "3f886e4bd7e9ad793a93580a629340f71190d199", "content_id": "6ed529fc3321ed84d03f2bbdc76245f4d63edfa6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2272, "license_type": "no_license", "max_line_length": 93, "num_lines": 66, "path": "/scripts/python/get_tweets.py", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "import json\nimport tweepy\nfrom datetime import datetime\nimport os\nimport pandas as pd\nimport sys\nimport hashlib\n\ndef create_user_locs(tweets):\n \n user_locs = []\n for tweet in tweets:\n username = str(tweet.user.screen_name.lower().encode('ascii',errors='ignore'))\n description = str(tweet.user.description.lower().encode('ascii',errors='ignore'))\n location = str(tweet.user.location.lower().encode('ascii',errors='ignore'))\n following = str(str(tweet.user.friends_count).lower().encode('ascii',errors='ignore'))\n followers = str(str(tweet.user.followers_count).lower().encode('ascii',errors='ignore'))\n totaltweets = str(str(tweet.user.statuses_count).lower().encode('ascii',errors='ignore'))\n retweetcount = str(str(tweet.retweet_count).lower().encode('ascii',errors='ignore'))\n text = str(tweet.text.lower().encode('ascii',errors='ignore'))\n \n th_tweet = [username, description, location, following,\n followers, totaltweets, retweetcount, text]\n \n user_locs.append(th_tweet)\n\n return user_locs\n\ndef hash_username(user_list):\n length = range(len(user_list))\n for i in length:\n user_list[i][0] = hashlib.md5(user_list[i][0].encode()).hexdigest()\n \n return user_list\n\ndef create_csv(data, file_name):\n df = pd.DataFrame(data=data,columns=['username', 'description', 'location', 'following',\n 'followers', 'totaltweets', 'retweetcount', 'text'])\n\n df.to_csv(file_name)\n\n\nif __name__ == \"__main__\":\n consumer_key = os.getenv('consumer_key')\n consumer_secret = os.getenv('consumer_secret')\n access_token = os.getenv('access_token')\n access_token_secret = os.getenv('access_token_secret')\n\n hashtag = sys.argv[1] + \" -filter:retweets\"\n # date_since format = yyyy-mm-dd\n date_since = sys.argv[2]\n number_items = int(sys.argv[3])\n file_name = sys.argv[4]\n\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True)\n\n tweets = tweepy.Cursor(api.search,\n q=hashtag,\n lang=\"pt\",\n since=date_since).items(number_items)\n\n data = create_user_locs(tweets)\n data_hash = hash_username(data)\n create_csv(data_hash, file_name)" }, { "alpha_fraction": 0.694716215133667, "alphanum_fraction": 0.694716215133667, "avg_line_length": 12.128205299377441, "blob_id": "7693945947277ad599428854a54cc7f8c9886570", "content_id": "3f407b47e9329e4dfce3e8cb3390ba7fbc0a4791", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 526, "license_type": "no_license", "max_line_length": 52, "num_lines": 39, "path": "/README.md", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "# Case - Engenheiro de Dados\n## Certificação Santander Data Master\n\n## Download\n\n```\ngit clone https://github.com/detonih/data_master.git\n```\n\n## Build da imagem\n\n```\nmake build\n```\n\n## Criação dos serviços em contêineres\n\n```\nmake up\n```\n\n## Exclusão dos serviços em contêineres\n\n```\nmake prune\n```\n\n## Acesso à linha de comando do serviço principal\n\n```\nmake bash\n```\n\n## Execução dos scripts de fluxo de trabalho\n\nAcessar à linha de comando do serviço principal\n```\nbash /scripts/sh/execute_flow_enade.sh\n```" }, { "alpha_fraction": 0.5612244606018066, "alphanum_fraction": 0.5981161594390869, "avg_line_length": 30.875, "blob_id": "7b15070f2fab16654c29875b73a6962caa2d7643", "content_id": "44d0bb9f806c994cefa79c47baf4e1a6b97a60ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 1274, "license_type": "no_license", "max_line_length": 126, "num_lines": 40, "path": "/scripts/sh/get_enade.sh", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ncurl https://download.inep.gov.br/microdados/Enade_Microdados/microdados_enade_2019.zip -o /raw-data/microdados_enade_2019.zip\n\nFILE=/raw-data/microdados_enade_2019.zip\nif [ -f \"$FILE\" ]; then\n\n echo \"Unziping file...\"\n unzip /raw-data/microdados_enade_2019.zip -d /raw-data/\n if [ $? -eq 0 ]; then\n echo \"File unziped successfully\"\n EXTRACTED_FILE=/raw-data/3.DADOS/microdados_enade_2019.txt\n rm /raw-data/microdados_enade_2019.zip\n else\n echo \"We have a problem unziping file\"\n fi\n\n if [ -f \"$EXTRACTED_FILE\" ]; then\n echo \"Putting $EXTRACTED_FILE to hdfs...\"\n hdfs dfs -put -f /raw-data/3.DADOS/microdados_enade_2019.txt /stage/microdados_enade_2019.txt\n mv /raw-data/3.DADOS/microdados_enade_2019.txt /raw-data/microdados_enade_2019.txt\n if [ $? -eq 0 ]; then\n echo \"Put file done\"\n echo \"Deleting raw data for enade\"\n for i in `seq 1 3`;\n do\n rm -rf /raw-data/$i.*\n echo \"/raw-data/$i.* deleted\"\n done\n \n else\n echo \"We have a problem putting $EXTRACTED_FILE on hdfs\"\n fi\n else\n echo \"$EXTRACTED_FILE does not exist.\"\n fi\n\nelse \n echo \"$FILE does not exist.\"\nfi" }, { "alpha_fraction": 0.7290322780609131, "alphanum_fraction": 0.7451612949371338, "avg_line_length": 33.44444274902344, "blob_id": "898887f507675741e0f01b8b0e8eba01eb70a6b2", "content_id": "9d330c21c01f6e108cf357e710078e6e3864879b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 310, "license_type": "no_license", "max_line_length": 137, "num_lines": 9, "path": "/config/env.sh", "repo_name": "detonih/data_master", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\necho \"\nexport JAVA_HOME=${JAVA_HOME}\nexport HADOOP_HOME=${HADOOP_HOME}\nexport HADOOP_CONF_DIR=${HADOOP_CONF_DIR}\n\" >> ${HADOOP_HOME}/etc/hadoop/hadoop-env.sh\n\nsed -i '99 i permission javax.management.MBeanTrustPermission \"register\";' /usr/lib/jvm/java-8-openjdk-amd64/jre/lib/security/java.policy\n" } ]
13
maxarndt/sdr-hello-world
https://github.com/maxarndt/sdr-hello-world
0ee41a622361b289bb19625577c998cdc735a415
4d204d7e032743195574a41a94b401aeda905fa4
7b74ab4eaff3f4dda0d2fbf11bc92e6923361b4e
refs/heads/master
2020-04-20T02:41:13.068624
2019-05-17T08:58:58
2019-05-17T08:58:58
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.523377537727356, "alphanum_fraction": 0.5473365783691406, "avg_line_length": 31.32330894470215, "blob_id": "7efd2298fa7d9325a42638350334b5f0ee0f187b", "content_id": "ebc30b53d7e75f29a711b96cb56321ba0724a3f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4299, "license_type": "no_license", "max_line_length": 85, "num_lines": 133, "path": "/lesson11/top_block.py", "repo_name": "maxarndt/sdr-hello-world", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n##################################################\n# GNU Radio Python Flow Graph\n# Title: Top Block\n# Generated: Thu May 16 15:36:23 2019\n##################################################\n\nif __name__ == '__main__':\n import ctypes\n import sys\n if sys.platform.startswith('linux'):\n try:\n x11 = ctypes.cdll.LoadLibrary('libX11.so')\n x11.XInitThreads()\n except:\n print \"Warning: failed to XInitThreads()\"\n\nfrom gnuradio import eng_notation\nfrom gnuradio import gr\nfrom gnuradio import wxgui\nfrom gnuradio.eng_option import eng_option\nfrom gnuradio.fft import window\nfrom gnuradio.filter import firdes\nfrom gnuradio.wxgui import fftsink2\nfrom gnuradio.wxgui import forms\nfrom grc_gnuradio import wxgui as grc_wxgui\nfrom optparse import OptionParser\nimport osmosdr\nimport time\nimport wx\n\n\nclass top_block(grc_wxgui.top_block_gui):\n\n def __init__(self):\n grc_wxgui.top_block_gui.__init__(self, title=\"Top Block\")\n\n ##################################################\n # Variables\n ##################################################\n self.samp_rate = samp_rate = 20e6\n self.channel_freq = channel_freq = 2.4e9\n\n ##################################################\n # Blocks\n ##################################################\n _channel_freq_sizer = wx.BoxSizer(wx.VERTICAL)\n self._channel_freq_text_box = forms.text_box(\n \tparent=self.GetWin(),\n \tsizer=_channel_freq_sizer,\n \tvalue=self.channel_freq,\n \tcallback=self.set_channel_freq,\n \tlabel='channel_freq',\n \tconverter=forms.float_converter(),\n \tproportion=0,\n )\n self._channel_freq_slider = forms.slider(\n \tparent=self.GetWin(),\n \tsizer=_channel_freq_sizer,\n \tvalue=self.channel_freq,\n \tcallback=self.set_channel_freq,\n \tminimum=2.4e9,\n \tmaximum=2.4835e9,\n \tnum_steps=835,\n \tstyle=wx.SL_HORIZONTAL,\n \tcast=float,\n \tproportion=1,\n )\n self.Add(_channel_freq_sizer)\n self.wxgui_fftsink2_0 = fftsink2.fft_sink_c(\n \tself.GetWin(),\n \tbaseband_freq=channel_freq,\n \ty_per_div=10,\n \ty_divs=10,\n \tref_level=0,\n \tref_scale=2.0,\n \tsample_rate=samp_rate,\n \tfft_size=1024,\n \tfft_rate=15,\n \taverage=False,\n \tavg_alpha=None,\n \ttitle='FFT Plot',\n \tpeak_hold=False,\n )\n self.Add(self.wxgui_fftsink2_0.win)\n self.osmosdr_source_0 = osmosdr.source( args=\"numchan=\" + str(1) + \" \" + '' )\n self.osmosdr_source_0.set_sample_rate(samp_rate)\n self.osmosdr_source_0.set_center_freq(channel_freq, 0)\n self.osmosdr_source_0.set_freq_corr(0, 0)\n self.osmosdr_source_0.set_dc_offset_mode(0, 0)\n self.osmosdr_source_0.set_iq_balance_mode(0, 0)\n self.osmosdr_source_0.set_gain_mode(False, 0)\n self.osmosdr_source_0.set_gain(0, 0)\n self.osmosdr_source_0.set_if_gain(16, 0)\n self.osmosdr_source_0.set_bb_gain(16, 0)\n self.osmosdr_source_0.set_antenna('', 0)\n self.osmosdr_source_0.set_bandwidth(0, 0)\n \n\n ##################################################\n # Connections\n ##################################################\n self.connect((self.osmosdr_source_0, 0), (self.wxgui_fftsink2_0, 0)) \n\n def get_samp_rate(self):\n return self.samp_rate\n\n def set_samp_rate(self, samp_rate):\n self.samp_rate = samp_rate\n self.wxgui_fftsink2_0.set_sample_rate(self.samp_rate)\n self.osmosdr_source_0.set_sample_rate(self.samp_rate)\n\n def get_channel_freq(self):\n return self.channel_freq\n\n def set_channel_freq(self, channel_freq):\n self.channel_freq = channel_freq\n self._channel_freq_slider.set_value(self.channel_freq)\n self._channel_freq_text_box.set_value(self.channel_freq)\n self.wxgui_fftsink2_0.set_baseband_freq(self.channel_freq)\n self.osmosdr_source_0.set_center_freq(self.channel_freq, 0)\n\n\ndef main(top_block_cls=top_block, options=None):\n\n tb = top_block_cls()\n tb.Start(True)\n tb.Wait()\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7184594869613647, "alphanum_fraction": 0.7503319978713989, "avg_line_length": 40.88888931274414, "blob_id": "78491ce05ebbafa77dfaf4e903f72417250560ff", "content_id": "01d73fd45e5339c69548b6dab0b379ac0499e042", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 753, "license_type": "no_license", "max_line_length": 85, "num_lines": 18, "path": "/README.md", "repo_name": "maxarndt/sdr-hello-world", "src_encoding": "UTF-8", "text": "# Software Defined Radio Hello World Application\n\nJust followed [Great Scott Gadgets SDR Tutorials](https://greatscottgadgets.com/sdr).\n\n## Lesson 1 - FM Radio\n[Link to Tutorial (Part 1)](https://greatscottgadgets.com/sdr/1/)\n* Set `channel_freq` to `96.3e6` which is the frequency of Bayern 3 in my hood :-)\n* Implement channel frequency slider.\n\n## Lesson 2 - Digital Signal Processing\n[Link to Tutorial (Part 2)](https://greatscottgadgets.com/sdr/2/)\n* generate waves\n\n## Lesson 11 - Replay Attack\n[Link to Tutorial (Part 11)](https://greatscottgadgets.com/sdr/11/)\n* Used a wireless power outlet control unit (working on 433,92MHz)\n* Used `inspectrum -r 2e6 FILENAME` to visualize the captured wave\n* Replayed the captured form of the on/off command" } ]
2
reactor-feng/engine
https://github.com/reactor-feng/engine
4f031b2ca073509151e62dfb43444f9f52228878
ae2ac58938f4cdeccb38fe34b73f859f0ab416a2
705a6857fa2350d0a490fffef854b926885387e1
refs/heads/master
2016-09-14T19:10:28.247934
2016-05-23T11:30:05
2016-05-23T11:30:05
56,138,532
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6650000214576721, "alphanum_fraction": 0.6800000071525574, "avg_line_length": 14.384614944458008, "blob_id": "cdb219ccc24e39815c1623a0cce5e27b8cb469fc", "content_id": "8cde91e074a1b2989f4803b9ca13e7eb88b8385c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 200, "license_type": "no_license", "max_line_length": 33, "num_lines": 13, "path": "/engine_/service.h", "repo_name": "reactor-feng/engine", "src_encoding": "UTF-8", "text": "#ifndef _SERVICE_H_\n#define _SERVICE_H_\n\nclass ServiceBase\n{\npublic:\n\tvirtual bool Init() = 0;\n\tvirtual bool ServiceStart() = 0;\n\tvirtual bool ServiceStop() = 0;\n\tvirtual ~ServiceBase() {}\n};\n\n#endif\n" }, { "alpha_fraction": 0.720588207244873, "alphanum_fraction": 0.720588207244873, "avg_line_length": 21.66666603088379, "blob_id": "9bfdcc7f300784c174c63739a9b8f0dc607ade9f", "content_id": "aaafa012b689074d469e81824dbde077e29de10c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 68, "license_type": "no_license", "max_line_length": 33, "num_lines": 3, "path": "/main.py", "repo_name": "reactor-feng/engine", "src_encoding": "UTF-8", "text": "import engine\nrst = engine.loop()\nprint(\"get rst from engine\", rst)\n" }, { "alpha_fraction": 0.6674008965492249, "alphanum_fraction": 0.6784141063690186, "avg_line_length": 16.461538314819336, "blob_id": "a73907328877c4f7de07504995eaf991260ff03c", "content_id": "c8ef3fe7190db80d7cb60433b79ae18b045984ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 454, "license_type": "no_license", "max_line_length": 52, "num_lines": 26, "path": "/engine_/service_timer.h", "repo_name": "reactor-feng/engine", "src_encoding": "UTF-8", "text": "#ifndef _SERVICE_TIMER_H_\n#define _SERVICE_TIMER_H_\n\n#include <common_def.h>\n#include <function_common.h>\n\nusing namespace boost;\n\nclass ServiceTimer\n{\nprivate:\n\tstd::set<int> m_setOfTick;\n\npublic:\n\ttemplate<class T1, class T2>\n\tint RegisterTick(\\\n\t\t\t\tshared_ptr<std::string> pMethodName,\\\n\t\t\t\tint nTickTime,\\\n\t\t\t\tboost::shared_ptr<FunctionObj<T1, T2> > pFunObj)\n\t{\n\t\tT2 pRsp = pFunObj->Execute();\n\t\tprintf(\"Cal result %d\\n\", *pRsp.get());\n\t}\n};\n\n#endif\n" }, { "alpha_fraction": 0.7168367505073547, "alphanum_fraction": 0.7270408272743225, "avg_line_length": 16.81818199157715, "blob_id": "7db7b94def803f9be309a54a17df56bfc80cde78", "content_id": "bae2fe5de3c52b93a0b6d119d5ca743feb24b4f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 392, "license_type": "no_license", "max_line_length": 31, "num_lines": 22, "path": "/engine_/common_def.h", "repo_name": "reactor-feng/engine", "src_encoding": "UTF-8", "text": "#ifndef _ENGINE_COMMON_DEF_H_\n#define _ENGINE_COMMON_DEF_H_\n\n#include <cstdio>\n#include <boost/python.hpp>\n#include <Python.h>\n#include <boost/asio.hpp>\n#include <boost/thread.hpp>\n#include <boost/shared_ptr.hpp>\n\n#include <iostream>\n#include <set>\n#include <map>\n#include <cassert>\n\n\n#define E_SERVICE_MAIN 0\n#define E_SERVICE_IO 1\n#define E_SERVICE_TIMER 2\n#define E_SERVICE_LAST 3\n\n#endif\n" }, { "alpha_fraction": 0.6839080452919006, "alphanum_fraction": 0.6839080452919006, "avg_line_length": 9.235294342041016, "blob_id": "ee38f2fcde154d29ac00e1a50b5f7f3c142ad24c", "content_id": "c668b13d7b964f3a6aa5b3eb38f7f9914f1f798c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 174, "license_type": "no_license", "max_line_length": 28, "num_lines": 17, "path": "/engine_/service_io.h", "repo_name": "reactor-feng/engine", "src_encoding": "UTF-8", "text": "#ifndef _SERVICE_IO_H_\n#define _SERVICE_IO_H_\n\n#include <boost/asio.hpp>\n\nusing namespace boost::asio;\n\nclass ServiceIO\n{\nprivate:\n\t\n\tip::tcp::endpoint()\n\npublic:\n};\n\n#endif\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6000000238418579, "avg_line_length": 21.5, "blob_id": "48a0fca35835236f27b799948c445020db72e87b", "content_id": "6c844ffd641f2fa4ce2857f2f9a238eff64c1c6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 45, "license_type": "no_license", "max_line_length": 32, "num_lines": 2, "path": "/cal.py", "repo_name": "reactor-feng/engine", "src_encoding": "UTF-8", "text": "def show():\n print(\"this in cal module.\")\n" }, { "alpha_fraction": 0.5675675868988037, "alphanum_fraction": 0.5711711645126343, "avg_line_length": 20.200000762939453, "blob_id": "2c2e4225208dfc7564afab00e3ee2c655510fbeb", "content_id": "b00a90c4b72506502fe26f3c6577281c96a7f3f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 555, "license_type": "no_license", "max_line_length": 92, "num_lines": 25, "path": "/engine_/makefile", "repo_name": "reactor-feng/engine", "src_encoding": "UTF-8", "text": "GXX = g++\r\nGXX_FLAG = -shared -fPIC\r\nPROJECT_NAME = ../engine.so\r\nINCLUDE_PATH = -I ~/trans/boost -I/usr/include/python2.7 -I./\r\nLIB_PATH = -L/usr/share/lib -L/home/reactor/trans/boost/stage/lib/\r\nLIBS = -lboost_python -lboost_system -lboost_thread\r\nEXTRAS =\r\n\r\nvpath =\r\nVPATH =\r\n\r\nOBJS = engine.o\r\n\r\nall: $(OBJS)\r\n\t$(GXX) $(GXX_FLAG) $(OBJS) $(INCLUDE_PATH) $(LIB_PATH) $(LIBS) -o $(PROJECT_NAME) $(EXTRAS)\r\n\r\n%.o: %.cc\r\n\t$(GXX) $(INCLUDE_PATH) -c $^ -o $@\r\n\r\n%.o: %.cpp\r\n\t$(GXX) $(INCLUDE_PATH) $(GXX_FLAG) -c $^ -o $@\r\n\r\nclean:\r\n\trm ./*.o\r\n\trm ./*.so\r\n" }, { "alpha_fraction": 0.6989644765853882, "alphanum_fraction": 0.7041420340538025, "avg_line_length": 23.125, "blob_id": "ac8e546e3056dc88bf11d8a97863da7c037fc3bf", "content_id": "86f843609e5538b854c09b806ebaddb0bd07bfa5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1352, "license_type": "no_license", "max_line_length": 133, "num_lines": 56, "path": "/engine_/engine.cpp", "repo_name": "reactor-feng/engine", "src_encoding": "UTF-8", "text": "#include <common_def.h>\n#include <service_timer.h>\n\nusing namespace boost;\n\ntypedef boost::shared_ptr<boost::asio::io_service> IO_SERVICE_PTR;\nstd::vector<IO_SERVICE_PTR> g_VecAllServicePtr;\n\nshared_ptr<int> Square(shared_ptr<int> pArg)\n{\n\tint nVal = *pArg.get();\n\tshared_ptr<int> pRst(new int(nVal * nVal));\n\treturn pRst;\n}\n\nvoid ServiceOfTimer()\n{\n\tstatic int s_nCounter = 0;\n\tprintf(\"run time service, counter is %d\\n\", s_nCounter);\n\ts_nCounter += 1;\n\t\n\tServiceTimer objTimerService = ServiceTimer();\n\n\tshared_ptr<std::string> pMethodName(new std::string(\"test_fun\"));\n\tshared_ptr<int> pReq(new int(10));\n\tshared_ptr< FunctionObj<shared_ptr<int>, shared_ptr<int> > > pFun(new FunctionObj<shared_ptr<int>, shared_ptr<int> >(Square, pReq));\n\tobjTimerService.RegisterTick(pMethodName, 100, pFun);\n}\n\nvoid ServiceOfIO()\n{\n\tprintf(\"start of io service\\n\");\n\tprintf(\"end of io service\\n\");\n}\n\nPyObject * Loop()\n{\n\tg_VecAllServicePtr.clear();\n\tfor (int i = E_SERVICE_IO; i < E_SERVICE_LAST; ++ i)\n\t{\n\t\tIO_SERVICE_PTR pIOService(new boost::asio::io_service);\n\t\tg_VecAllServicePtr.push_back(pIOService);\n\t}\n\tboost::thread threadOfIO(ServiceOfIO);\n\tboost::thread threadOfTimer(ServiceOfTimer);\n\n\tthreadOfIO.join();\n\tthreadOfTimer.join();\n\n\treturn Py_BuildValue(\"s\", \"oops all is done.\");\n}\n\nBOOST_PYTHON_MODULE(engine)\n{\n\tboost::python::def(\"loop\", Loop);\n}\n\n" }, { "alpha_fraction": 0.6936089992523193, "alphanum_fraction": 0.6936089992523193, "avg_line_length": 15.625, "blob_id": "30e4deec95c83e3f319d5f4d83f35246fd99be56", "content_id": "1d63b9387ca3b8b9d9cc17386194d52224aacbc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 532, "license_type": "no_license", "max_line_length": 62, "num_lines": 32, "path": "/engine_/function_common.h", "repo_name": "reactor-feng/engine", "src_encoding": "UTF-8", "text": "#ifndef _FUNCTION_COMMON_H_\n#define _FUNCTION_COMMON_H_\n\n#include <common_def.h>\n\n/*\n * TFunPtr, TReqPtr, TRspPtr all is type of boost::shared_ptr\n * */\ntemplate<class TReqPtr, class TRspPtr>\nclass FunctionObj\n{\nprivate:\n\ttypedef TRspPtr (*TFunPtr)(TReqPtr);\n\tTFunPtr m_pFunPtr;\n\tTReqPtr m_pReqPtr;\n\npublic:\n\texplicit FunctionObj(TFunPtr pFunObj, TReqPtr pReqObj)\n\t{\n\t\tthis->m_pFunPtr = pFunObj;\n\t\tthis->m_pReqPtr = pReqObj;\n\t}\n\n\t~FunctionObj() {}\n\n\tTRspPtr Execute()\n\t{\n\t\treturn (*this->m_pFunPtr)(this->m_pReqPtr);\n\t}\n};\n\n#endif\n" } ]
9
sudhakarmlal/EVA4Phase2CapStone
https://github.com/sudhakarmlal/EVA4Phase2CapStone
5d5fab72817f4b1201565e69f10cc07c908dc3ce
c81bd4ff75f95933e3fc2bd0acb4f95482939c69
af732226cf109fc09fc08853d6d9abe8a583926b
refs/heads/main
2023-02-13T20:36:36.342457
2021-01-10T17:33:46
2021-01-10T17:33:46
328,438,827
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6403405666351318, "alphanum_fraction": 0.6467997431755066, "avg_line_length": 29.693693161010742, "blob_id": "80fc836984f39405ee692f52a5d92e75afb61197", "content_id": "00f407e132641dff2c5b6a949cbdd9e963f0d038", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3406, "license_type": "no_license", "max_line_length": 115, "num_lines": 111, "path": "/tweetsa.py", "repo_name": "sudhakarmlal/EVA4Phase2CapStone", "src_encoding": "UTF-8", "text": "import spacy\nimport torch, torchtext\nnlp = spacy.load('en')\nfrom flask import Flask, jsonify, request, redirect, render_template\nimport os, pickle\nimport torch.nn as nn\nimport torch.nn.functional as F\n\napp = Flask(__name__)\napp.secret_key = \"secret key\"\n\n# define model\nclass classifier(nn.Module):\n\n # Define all the layers used in model\n def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, dropout):\n super().__init__()\n\n # Embedding layer\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n\n # LSTM layer\n self.encoder = nn.LSTM(embedding_dim,\n hidden_dim,\n num_layers=n_layers,\n dropout=dropout,\n batch_first=True)\n # try using nn.GRU or nn.RNN here and compare their performances\n # try bidirectional and compare their performances\n\n # Dense layer\n self.fc = nn.Linear(hidden_dim, output_dim)\n\n def forward(self, text, text_lengths):\n # text = [batch size, sent_length]\n embedded = self.embedding(text)\n # embedded = [batch size, sent_len, emb dim]\n\n # packed sequence\n packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths.cpu(), batch_first=True)\n\n packed_output, (hidden, cell) = self.encoder(packed_embedded)\n # hidden = [batch size, num layers * num directions,hid dim]\n # cell = [batch size, num layers * num directions,hid dim]\n\n # Hidden = [batch size, hid dim * num directions]\n dense_outputs = self.fc(hidden)\n\n # Final activation function softmax\n output = F.softmax(dense_outputs[0], dim=1)\n\n return output\n\n# Define hyperparameters\nsize_of_vocab = 4651\nembedding_dim = 300\nnum_hidden_nodes = 100\nnum_output_nodes = 3\nnum_layers = 2\ndropout = 0.2\n\n# Instantiate the model\nmodel = classifier(size_of_vocab, embedding_dim, num_hidden_nodes, num_output_nodes, num_layers, dropout = dropout)\n\n# load weights and tokenizer\n\npath = '/home/ubuntu/tweetsa/models/saved_weights.pt'\nmodel.load_state_dict(torch.load(path));\nmodel.eval();\ntokenizer_file = open('/home/ubuntu/tweetsa/models/tokenizer.pkl', 'rb')\ntokenizer = pickle.load(tokenizer_file)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# inference\n\ndef classify_tweet(tweet):\n\n # tokenize the tweet\n tokenized = [tok.text for tok in nlp.tokenizer(tweet)]\n # convert to integer sequence using predefined tokenizer dictionary\n indexed = [tokenizer[t] for t in tokenized]\n # compute no. of words\n length = [len(indexed)]\n # convert to tensor\n tensor = torch.LongTensor(indexed).to(device)\n # reshape in form of batch, no. of words\n tensor = tensor.unsqueeze(1).T\n # convert to tensor\n length_tensor = torch.LongTensor(length)\n # Get the model prediction\n prediction = model(tensor, length_tensor)\n\n _, pred = torch.max(prediction, 1)\n\n return pred.item()\n\n# URL Routes\[email protected]('/')\ndef index():\n return render_template('home.html')\n\[email protected]('/', methods=['POST'])\ndef predict():\n if request.method == 'POST':\n tweet = request.form['tweet']\n my_prediction = classify_tweet(tweet)\n return render_template('result.html', prediction=my_prediction)\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')" }, { "alpha_fraction": 0.6325041055679321, "alphanum_fraction": 0.640697181224823, "avg_line_length": 29.513635635375977, "blob_id": "4853512c0f80a89eb8e9cd028ee60e9b0d827cdc", "content_id": "750bdaefbc985a6113ed30b9344a5981ed726a94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6713, "license_type": "no_license", "max_line_length": 115, "num_lines": 220, "path": "/texttranslation/training/train_lstm.py", "repo_name": "sudhakarmlal/EVA4Phase2CapStone", "src_encoding": "UTF-8", "text": "# Import Libraries\nimport boto3\nimport io\nimport pandas as pd\nimport random\nimport os, pickle\nimport torch, torchtext\nfrom torchtext import data\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Manual Seed\nSEED = 43\ntorch.manual_seed(SEED)\n\n# Download dataset\ns3 = boto3.client('s3', aws_access_key_id= 'AKIAJQ4G7Y5I3HD33SMA',\n aws_secret_access_key= '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')\nobj = s3.get_object(Bucket='tsaibucket', Key='tweets.csv')\ndf = pd.read_csv(io.BytesIO(obj['Body'].read()))\n\nprint(df.shape)\nprint(df.labels.value_counts())\n\n# Defining Fields\n\nTweet = data.Field(sequential = True, tokenize = 'spacy', batch_first =True, include_lengths=True)\nLabel = data.LabelField(tokenize ='spacy', is_target=True, batch_first =True, sequential =False)\nfields = [('tweets', Tweet),('labels',Label)]\nexample = [data.Example.fromlist([df.tweets[i],df.labels[i]], fields) for i in range(df.shape[0])]\ntwitterDataset = data.Dataset(example, fields)\n(train, valid) = twitterDataset.split(split_ratio=[0.85, 0.15], random_state=random.seed(SEED))\nprint((len(train), len(valid)))\n\nprint(vars(train.examples[10]))\n\nTweet.build_vocab(train)\nLabel.build_vocab(train)\n\nprint('Size of input vocab : ', len(Tweet.vocab))\nprint('Size of label vocab : ', len(Label.vocab))\nprint('Top 10 words appreared repeatedly :', list(Tweet.vocab.freqs.most_common(10)))\nprint('Labels : ', Label.vocab.stoi)\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\ntrain_iterator, valid_iterator = data.BucketIterator.splits((train, valid), batch_size = 32,\n sort_key = lambda x: len(x.tweets),\n sort_within_batch=True, device = device)\n\nwith open('/home/ubuntu/tweetsa/models/tokenizer.pkl', 'wb') as tokens:\n pickle.dump(Tweet.vocab.stoi, tokens)\n\n# define model\n\nclass classifier(nn.Module):\n\n # Define all the layers used in model\n def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, dropout):\n super().__init__()\n\n # Embedding layer\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n\n # LSTM layer\n self.encoder = nn.LSTM(embedding_dim,\n hidden_dim,\n num_layers=n_layers,\n dropout=dropout,\n batch_first=True)\n # try using nn.GRU or nn.RNN here and compare their performances\n # try bidirectional and compare their performances\n\n # Dense layer\n self.fc = nn.Linear(hidden_dim, output_dim)\n\n def forward(self, text, text_lengths):\n # text = [batch size, sent_length]\n embedded = self.embedding(text)\n # embedded = [batch size, sent_len, emb dim]\n\n # packed sequence\n packed_embedded = nn.utils.rnn.pack_padded_sequence(embedded, text_lengths.cpu(), batch_first=True)\n\n packed_output, (hidden, cell) = self.encoder(packed_embedded)\n # hidden = [batch size, num layers * num directions,hid dim]\n # cell = [batch size, num layers * num directions,hid dim]\n\n # Hidden = [batch size, hid dim * num directions]\n dense_outputs = self.fc(hidden)\n\n # Final activation function softmax\n output = F.softmax(dense_outputs[0], dim=1)\n\n return output\n\n# Define hyperparameters\nsize_of_vocab = len(Tweet.vocab)\nembedding_dim = 300\nnum_hidden_nodes = 100\nnum_output_nodes = 3\nnum_layers = 2\ndropout = 0.2\n\n# Instantiate the model\nmodel = classifier(size_of_vocab, embedding_dim, num_hidden_nodes, num_output_nodes, num_layers, dropout = dropout)\nprint(model)\n\n\n# No. of trianable parameters\ndef count_parameters(model):\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\nprint(f'The model has {count_parameters(model):,} trainable parameters')\n\nimport torch.optim as optim\n\n# define optimizer and loss\noptimizer = optim.Adam(model.parameters(), lr=2e-4)\ncriterion = nn.CrossEntropyLoss()\n\n\n# define metric\ndef binary_accuracy(preds, y):\n # round predictions to the closest integer\n _, predictions = torch.max(preds, 1)\n\n correct = (predictions == y).float()\n acc = correct.sum() / len(correct)\n return acc\n\n\n# push to cuda if available\nmodel = model.to(device)\ncriterion = criterion.to(device)\n\n# train loop\ndef train(model, iterator, optimizer, criterion):\n # initialize every epoch\n epoch_loss = 0\n epoch_acc = 0\n\n # set the model in training phase\n model.train()\n\n for batch in iterator:\n # resets the gradients after every batch\n optimizer.zero_grad()\n\n # retrieve text and no. of words\n tweet, tweet_lengths = batch.tweets\n\n # convert to 1D tensor\n predictions = model(tweet, tweet_lengths).squeeze()\n\n # compute the loss\n loss = criterion(predictions, batch.labels)\n\n # compute the binary accuracy\n acc = binary_accuracy(predictions, batch.labels)\n\n # backpropage the loss and compute the gradients\n loss.backward()\n\n # update the weights\n optimizer.step()\n\n # loss and accuracy\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n#evaluate loop\ndef evaluate(model, iterator, criterion):\n # initialize every epoch\n epoch_loss = 0\n epoch_acc = 0\n\n # deactivating dropout layers\n model.eval()\n\n # deactivates autograd\n with torch.no_grad():\n for batch in iterator:\n # retrieve text and no. of words\n tweet, tweet_lengths = batch.tweets\n\n # convert to 1d tensor\n predictions = model(tweet, tweet_lengths).squeeze()\n\n # compute loss and accuracy\n loss = criterion(predictions, batch.labels)\n acc = binary_accuracy(predictions, batch.labels)\n\n # keep track of loss and accuracy\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\n\nN_EPOCHS = 10\nbest_valid_loss = float('inf')\n\nfor epoch in range(N_EPOCHS):\n\n # train the model\n train_loss, train_acc = train(model, train_iterator, optimizer, criterion)\n\n # evaluate the model\n valid_loss, valid_acc = evaluate(model, valid_iterator, criterion)\n\n # save the best model\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(model.state_dict(), '/home/ubuntu/tweetsa/models/saved_weights.pt')\n\n print(f'\\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc * 100:.2f}%')\n print(f'\\t Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc * 100:.2f}% \\n')\n" } ]
2
dej-e/instagram_competition
https://github.com/dej-e/instagram_competition
c9c62dbb9a7c2ff5b38672a7edb0c4e28b10143d
19d06ed7d3e3c7f272c8206ac30c7cf9050a347b
d28e04634189ce0f7e03bd49e1e3ccba81884e96
refs/heads/master
2020-09-05T04:34:27.524995
2019-11-28T05:06:01
2019-11-28T05:06:01
204,286,342
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5128205418586731, "alphanum_fraction": 0.7179487347602844, "avg_line_length": 18.5, "blob_id": "383f4d2f2746bc4ef1eac4f47ab77162a536e3f6", "content_id": "0e79e70865b4312aaf247d302a632d9b6cfcfd25", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 39, "license_type": "permissive", "max_line_length": 21, "num_lines": 2, "path": "/requirements.txt", "repo_name": "dej-e/instagram_competition", "src_encoding": "UTF-8", "text": "instabot==0.73.0\npython-dotenv==0.10.3\n" }, { "alpha_fraction": 0.6399462819099426, "alphanum_fraction": 0.644424557685852, "avg_line_length": 24.965116500854492, "blob_id": "51792082216200435cf6b75bbd6026f7a4d6ca36", "content_id": "e9821032b38da157e8104cee80b35a919a9b5e77", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2273, "license_type": "permissive", "max_line_length": 69, "num_lines": 86, "path": "/competition.py", "repo_name": "dej-e/instagram_competition", "src_encoding": "UTF-8", "text": "import os\nfrom instabot import Bot\nfrom dotenv import load_dotenv\nimport re\nimport argparse\n\n\ndef get_usernames_from_comment(comment):\n pattern = re.compile(\n r'(?:@)([A-Za-z0-9_]'\n r'(?:(?:[A-Za-z0-9_]|(?:\\.(?!\\.))){0,28}(?:[A-Za-z0-9_]))?)')\n return pattern.findall(comment)\n\n\ndef is_user_exists(bot, username):\n return bot.get_user_id_from_username(username) is not None\n\n\ndef is_user_tag_friends(bot, comment):\n text_comment = comment['text']\n tagged_friends = get_usernames_from_comment(text_comment)\n\n for friend in tagged_friends:\n if is_user_exists(bot, friend):\n return True\n return False\n\n\ndef is_users_followed(comment, followed_users):\n followed_user_id = str(comment['user_id'])\n return followed_user_id in followed_users\n\n\ndef is_user_liked(comment):\n liked = 'comment_like_count'\n return liked in comment and comment[liked] > 0\n\n\ndef main():\n load_dotenv()\n\n parser = argparse.ArgumentParser(\n description='Программа для конкурсов в Instagram'\n )\n parser.add_argument(\"url\", help=\"Адрес ссылки (URL) на акцию\")\n args = parser.parse_args()\n instagram_url = args.url\n\n login = os.getenv('INSTAGRAM_LOGIN')\n password = os.getenv('INSTAGRAM_PASSWORD')\n instagram_dir = os.getenv('INSTAGRAM_DIR')\n\n if not os.path.exists(instagram_dir):\n os.makedirs(instagram_dir)\n\n bot = Bot(base_path=instagram_dir)\n bot.login(username=login, password=password, force=True)\n\n media_id = bot.get_media_id_from_link(instagram_url)\n media_owner_id = bot.get_media_owner(media=media_id)\n media_owner = bot.get_username_from_user_id(media_owner_id)\n author_followers = bot.get_user_followers(media_owner)\n comments = bot.get_media_comments_all(media_id=media_id)\n\n finalists = []\n\n for comment in comments:\n if not is_user_liked(comment):\n continue\n\n if not is_users_followed(comment, author_followers):\n continue\n\n if not is_user_tag_friends(bot, comment):\n continue\n\n username = comment['user']['username']\n finalists.append(username)\n\n winners = set(finalists)\n bot.logout()\n print(winners)\n\n\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.7597087621688843, "alphanum_fraction": 0.7633495330810547, "avg_line_length": 25.612903594970703, "blob_id": "3b3dfbd01c7ec360dc096cb6edcc8b78c2fb2e64", "content_id": "d2d261afb62be2d66a1c2561d1c586fd3fe2d1c2", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1222, "license_type": "permissive", "max_line_length": 126, "num_lines": 31, "path": "/README.md", "repo_name": "dej-e/instagram_competition", "src_encoding": "UTF-8", "text": "# Инструмент для конкурсов в Instagram\n\nСкрипт ищет победителей конкурса в [Instagram](https://www.instagram.com/)\n\n\n### Как установить\n\nPython3 должен быть уже установлен. Затем используйте pip (или pip3, есть есть конфликт с Python2) для установки зависимостей:\n```\npip install -r requirements.txt\n```\n\nДля работы скрипта необходимо зарегистрироваться в Instagram.\n \nЕсли у вас нет аккаута в Instagram, создайте его.\n\nПосле клонирования проекта создайте в корень файл ```.env``` с таким содержимым:\n```\nINSTAGRAM_LOGIN=_ваш логин от Instagram_\nINSTAGRAM_PASSWORD=_ваш пароль от Instagram_\nINSTAGRAM_DIR=instagram\n```\n\n\n### Пример запуска\n\n```python competition.py _ссылка на конкурс в Instagram_```\n\n### Цель проекта\n\nКод написан в образовательных целях на онлайн-курсе для веб-разработчиков [dvmn.org](https://dvmn.org/)." } ]
3
dfaijlan/CST205_TeamProject
https://github.com/dfaijlan/CST205_TeamProject
d1720a79733010f73f95545d21765e4375617b23
ba39f244b7da0c09a7329ed36ee37eed1d910b87
f9d83cd24fe38f74accdb34180ce72e6ed58a0d9
refs/heads/master
2021-08-28T04:10:46.988525
2017-12-11T06:25:23
2017-12-11T06:25:23
110,587,164
0
0
null
2017-11-13T18:44:41
2017-11-13T18:48:01
2017-11-15T20:40:04
Python
[ { "alpha_fraction": 0.7016128897666931, "alphanum_fraction": 0.7197580933570862, "avg_line_length": 24.435897827148438, "blob_id": "2773a8404738a75a7ca6df49ff889e979bb6c935", "content_id": "faff027aa3d4d7604b31c6b1da6df14c64c7834f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 992, "license_type": "no_license", "max_line_length": 75, "num_lines": 39, "path": "/README.md", "repo_name": "dfaijlan/CST205_TeamProject", "src_encoding": "UTF-8", "text": "# MPy3 Player\n\n* Team Members: Gerardo Alcaraz, Dominic Fajilan, Yashkaran Singh\n* Class: CST205 - Multimedia Design & Programming\n* Date: 12/11/17\n\n## How to Run program\n\n1. Run 'py MPy3_Player' in your terminal.\n2. Once the window opens, use the 'Pick a song' drop box to pick one of the\n available songs to play.\n3. Once the song is playing, you can use any of the features available to\n control the song:\n\n```\nBack button: Rewinds the song if the length of the song is greater than 3,\n otherwise, go back a song in the list.\n\nPlay button: Resumes the song if the song is paused.\n\nNext: Chooses the next song in the list and plays it.\n\nStop: Completely stop the song, reverts back to default screen.\n\nVolume slider: Can either make the song louder or quieter.\n\nMute radio button: Mutes the song\n```\n\n## Github link\n\nhttps://github.com/dfaijlan/CST205_TeamProject\n\n## Future work\n\n* Add more songs\n* Add an equalizer\n* Make the GUI cleaner\n* Add the option to choose a song based on artist and album\n" }, { "alpha_fraction": 0.5796566605567932, "alphanum_fraction": 0.5902454853057861, "avg_line_length": 37.47530746459961, "blob_id": "89dff8acb4e50ee6a4bed3f16ca05d33ac709b54", "content_id": "f67427a8cbfa6b7d3be6aa4b57e412d8c6e35fc9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12466, "license_type": "no_license", "max_line_length": 128, "num_lines": 324, "path": "/MPy3_Player.py", "repo_name": "dfaijlan/CST205_TeamProject", "src_encoding": "UTF-8", "text": "########## HEADER COMMENTS\n# Course: CST205 - Multimedia Programming and Design\n# Title: MPy3 Player\n# Abstract: MP3 player created with python to play songs. User can pick a song,\n# skip to the next song, stop the song, etc.\n# Authors: Gerardo Alcaraz, Dominic Fajilan, Yashkaran Singh\n# Date: 12/11/17\n#\n# Github link: https://github.com/dfaijlan/CST205_TeamProject\n##########\n\nimport sys\nimport random\nfrom PIL import Image\nimport pygame\nimport datetime\nfrom pygame import mixer\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtCore import (pyqtSlot, Qt)\nfrom PyQt5.QtGui import *\n\n##### All three of us created the dictionaries\nmy_new_dict = {\n \"Pick a song\":{\n \"artist_name\" : \"\",\n \"song_path\" : \"\",\n \"img_path\" : \"\",\n \"song_length\" : \"\"\n },\n \"Still Feelin It\":{\n \"artist_name\" : \"Mistah F.A.B.\",\n \"album name\" : \"Son of a Pimp\",\n \"song_path\":\"songs/Still-Feelin-It_Mix.mp3\",\n \"img_path\":\"images/stf_img.jpg\",\n \"song_length\" : 86\n },\n \"Hey Jude\":{\n \"artist_name\" : \"The Beatles\",\n \"album name\" : \"Hey Jude\",\n \"song_path\":\"songs/HeyJude.mp3\",\n \"img_path\":\"images/beatles_img.jpg\",\n \"song_length\" : 377\n },\n \"2\" : {\n \"artist_name\" : \"H.E.R.\",\n \"album name\" : \"H.E.R.\",\n \"song_path\" : \"songs/HER-2.mp3\",\n \"img_path\":\"images/her_img.jpg\",\n \"song_length\" : 190\n },\n \"Frozen\" : {\n \"artist_name\" : \"Sabrina Claudio\",\n \"album name\" : \"About Time\",\n \"song_path\" : \"songs/Frozen.mp3\",\n \"img_path\":\"images/frozen_img.jpg\",\n \"song_length\" : 245\n },\n \"Psychotic Girl\" : {\n \"artist_name\" : \"The Black Keys\",\n \"album name\" : \"Attack & Release\",\n \"song_path\" : \"songs/PsychoticGirl.mp3\",\n \"img_path\" : \"images/keys_img.jpg\",\n \"song_length\" : 251\n }\n}\n\nbutton_list =[\"Back\",\"Play\",\"Pause\",\"Next\",\"Stop\"]\n#####\n\nclass Window(QWidget):\n ########## Yashkaran created most of the GUI, Dominic added song_progress /\n ########## song_max and the timer loop\n def __init__(self):\n super().__init__()\n\n #inner vlayout\n\n #self.myFont = QtGui\n\n #self.myFont = QtGui.QFont()\n\n # add QComboBox for the song list\n self.song_list = QComboBox()\n self.song_list.addItems(my_new_dict.keys())\n self.song_name = QLabel(\"Select a Song to Play!!!\")\n\n # label to display artist name\n self.artist_name = QLabel()\n\n # Dislay image for the song if any\n self.cover_image = QLabel()\n\n # Display album name\n self.album_name = QLabel()\n\n inner_v_layout_song_info = QVBoxLayout()\n inner_v_layout_song_info.addWidget(self.song_name)\n inner_v_layout_song_info.addWidget(self.artist_name)\n\n inner_v_layout_song_info.addWidget(self.album_name)\n inner_v_layout_disp_image = QVBoxLayout()\n\n # Music Image\n self.music_image = QLabel()\n music_pic = QPixmap(\"images/music.png\")\n # music_pic = music_pic.scaledToWidth(600)\n self.music_image.setPixmap(music_pic)\n\n #Volume Controls\n self.volume_label = QLabel(\"<h5>Volume: </h5>\")\n self.vol_slider = QSlider()\n self.vol_slider.setOrientation(Qt.Horizontal)\n self.vol_slider.setRange(0,100)\n self.vol_slider.setValue(5)\n self.vol_slider.valueChanged.connect(self.vol_change)\n self.mute_label = QLabel(\"<h5>Mute: </h5>\")\n self.mute_button = QRadioButton()\n self.mute_button.toggled.connect(self.mute_me)\n\n #layout for volume Controls\n inner_h_layout_volume_controls = QHBoxLayout()\n inner_h_layout_volume_controls.addSpacing(50)\n inner_h_layout_volume_controls.addWidget(self.volume_label)\n inner_h_layout_volume_controls.addSpacing(10)\n inner_h_layout_volume_controls.addWidget(self.vol_slider)\n inner_h_layout_volume_controls.addSpacing(50)\n inner_h_layout_volume_controls.addWidget(self.mute_label)\n inner_h_layout_volume_controls.addSpacing(10)\n inner_h_layout_volume_controls.addWidget(self.mute_button)\n inner_h_layout_volume_controls.addSpacing(50)\n\n\n\n\n # layout for song info and image\n outer_h_layout_contain_inner = QHBoxLayout()\n outer_h_layout_contain_inner.addWidget(self.music_image)\n outer_h_layout_contain_inner.addLayout(inner_v_layout_song_info)\n outer_h_layout_contain_inner.addLayout(inner_v_layout_disp_image)\n\n # layout for Buttons\n self.outer_h_layout_contain_buttons = QHBoxLayout()\n\n # Buttons\n self.button_map = {}\n for i in button_list:\n my_button = QPushButton(i)\n my_button.setStyleSheet(\"background-color: #FFFFFF\")\n my_button.clicked.connect(self.on_click)\n # self.saveButton(my_button)\n self.button_map[my_button.text()] = my_button\n self.outer_h_layout_contain_buttons.addWidget(my_button)\n\n # Layout for song song_progress\n outer_h_layout_contain_progress = QHBoxLayout()\n\n #Add Stretch to move the progress bar/text to the far right\n outer_h_layout_contain_progress.addStretch(1)\n\n # Song progress tracker\n self.song_progress = QLabel()\n self.song_max = QLabel(\"/ 0:00:00\")\n\n outer_h_layout_contain_progress.addWidget(self.song_progress)\n outer_h_layout_contain_progress.addWidget(self.song_max)\n\n #Add Stretch on the right to move the progress bar/text to the center by stretching it from the right side. Aligning it.\n outer_h_layout_contain_progress.addStretch(1)\n\n #main v layout\n main_v_layout = QVBoxLayout()\n main_v_layout.addLayout(outer_h_layout_contain_inner)\n main_v_layout.addWidget(self.song_list)\n main_v_layout.addLayout(self.outer_h_layout_contain_buttons)\n\n # add volume slider here\n main_v_layout.addLayout(inner_h_layout_volume_controls)\n\n # main_v_layout.addWidget(self.vol_slider)\n main_v_layout.addLayout(outer_h_layout_contain_progress)\n\n # outer_v_layout.addLayout(inner_h_layout)\n self.setLayout(main_v_layout)\n\n self.song_list.currentIndexChanged.connect(self.update_ui)\n\n # first two arguments for position on screen\n # second two arguments for dimensions of window (width, height)\n\n self.setWindowTitle(\"My Player\")\n self.progress_of_song()\n self.my_timer = QtCore.QTimer()\n self.my_timer.timeout.connect(self.progress_of_song)\n self.my_timer.start(60)\n\n\n @pyqtSlot()\n ########## This updates the GUI whenever the user changes the song\n ##### Yashkaran made the buttons and general layout\n ##### Gerardo was able to make the songs play and made it display alb\n ##### Dominic set the progress of the song\n def update_ui(self):\n\n my_text = self.song_list.currentText()\n\n #Reset button colors\n self.reset_button_color()\n pygame.mixer.quit()\n if (my_text != \"Pick a song\"):\n # starts the song, displays all information of song, collaborated on by all three members\n self.song_name.setText(\"<h5>Song:</h5> <br>\" + f\"<h4>{my_text}</h4>\")\n self.artist_name.setText(\"<h5>Artist:</h5> <br>\" + f'<h4>{my_new_dict[my_text][\"artist_name\"]}</h4>')\n self.album_name.setText(\"<h5>Album:</h5> <br>\" + f'<h4>{my_new_dict[my_text][\"album name\"]}</h4>')\n pixmap = QPixmap(my_new_dict[my_text][\"img_path\"])\n pixmap = pixmap.scaledToWidth(600)\n self.music_image.setPixmap(pixmap)\n pygame.mixer.init()\n pygame.init()\n pygame.mixer.music.load(my_new_dict[my_text][\"song_path\"])\n self.song_max.setText(\"/\" + str(datetime.timedelta(seconds=my_new_dict[my_text][\"song_length\"])))\n pygame.mixer.music.set_endevent(pygame.USEREVENT)\n pygame.event.set_allowed(pygame.USEREVENT)\n pygame.mixer.music.play()\n if(self.mute_button.isChecked()):\n pygame.mixer.music.set_volume(0.0)\n else:\n pygame.mixer.music.set_volume(self.vol_slider.value()/100)\n else:\n self.song_name.setText(\"Select a song to Play!!!\")\n index = self.song_list.findText(\"Pick a song\")\n self.artist_name.setText(\"\")\n self.album_name.setText(\"\")\n self.song_list.setCurrentIndex(index)\n self.song_max.setText(\"/0:00:00\")\n music_pic = QPixmap(\"images/music.png\")\n music_pic = music_pic.scaledToWidth(600)\n self.music_image.setPixmap(music_pic)\n\n\n @pyqtSlot()\n ########## Whenever the user clicks a button, such as stop or play, an event will happen\n ##### Yashkaran made the colors of the buttons change, the play, pause, and stop Buttons\n ##### Dominic made the back, next, and also worked on the stop button\n def on_click(self):\n button = self.sender()\n if(pygame.init()):\n # Revert back color to default\n self.reset_button_color()\n # Change color of the pressed button\n self.button_map[button.text()].setStyleSheet(\"background-color: #A6C6D1\")\n # pauses the music\n if(button.text()==\"Pause\"):\n pygame.mixer.music.pause()\n # either starts the music from the beginning or go back a song\n elif(button.text()==\"Back\"):\n current_time = int(pygame.mixer.music.get_pos() / 1000)\n # if the time of the song is longer than 3 seconds, go back tothe beginning of the song\n if (current_time <=3):\n index = int(self.song_list.currentIndex()) - 1\n if (index > 0):\n self.song_list.setCurrentIndex(index)\n else:\n self.song_list.setCurrentIndex(len(my_new_dict) -1)\n # else go back a song\n else:\n my_text = self.song_list.currentText()\n pygame.mixer.music.stop()\n pygame.mixer.music.load(my_new_dict[my_text][\"song_path\"])\n pygame.mixer.music.play()\n elif(button.text()==\"Play\"):\n pygame.mixer.music.unpause()\n # stops the song, and reverts the screen back to default\n elif(button.text()==\"Stop\"):\n pygame.mixer.music.stop()\n self.song_name.setText(\"Select a song to Play!!!\")\n index = self.song_list.findText(\"Pick a song\")\n self.artist_name.setText(\"\")\n self.album_name.setText(\"\")\n self.song_list.setCurrentIndex(index)\n self.song_max.setText(\"/0:00:00\")\n music_pic = QPixmap(\"images/music.png\")\n music_pic = music_pic.scaledToWidth(600)\n self.music_image.setPixmap(music_pic)\n # picks next song in list\n elif(button.text()==\"Next\"):\n index = int(self.song_list.currentIndex()) + 1\n if (index < len(my_new_dict)):\n self.song_list.setCurrentIndex(index)\n else:\n self.song_list.setCurrentIndex(1)\n\n # changes the volume, created by Yashkaran\n def vol_change(self):\n if(pygame.init()):\n my_vol = self.vol_slider.value()/100\n if(self.mute_button.isChecked() == False):\n pygame.mixer.music.set_volume(my_vol)\n\n # mutes the song, created by Yashkaran\n def mute_me(self):\n if(pygame.init()):\n if(self.mute_button.isChecked()):\n pygame.mixer.music.set_volume(0.0)\n else:\n pygame.mixer.music.set_volume(self.vol_slider.value()/100)\n\n # displays the progess of the song, created by Dominic\n def progress_of_song(self):\n if(pygame.init()):\n current_time = int(pygame.mixer.music.get_pos() / 1000)\n self.song_progress.setText(str(datetime.timedelta(seconds=current_time)))\n self.update()\n\n # resets the button color, created by Yashkaran\n def reset_button_color(self):\n for widget in self.button_map:\n self.button_map[widget].setStyleSheet(\"background-color: #FFFFFF\")\n\napp = QApplication(sys.argv)\nmain = Window()\nmain.show()\nsys.exit(app.exec_())\npygame.mixer.music.quit()\n" }, { "alpha_fraction": 0.4753086566925049, "alphanum_fraction": 0.5092592835426331, "avg_line_length": 80, "blob_id": "a81c68f4d816ece3452510b6662e053bda08af16", "content_id": "6ba9082dc72330a1ebfdf5fac88c1acc1bf34b9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1944, "license_type": "no_license", "max_line_length": 117, "num_lines": 24, "path": "/music.py", "repo_name": "dfaijlan/CST205_TeamProject", "src_encoding": "UTF-8", "text": "highway_61_revisited = [\"Select Song\", \"1. Like a Rolling Stone\", \"2. Tombstone Blues\",\n \"3. It Takes a Lot to Laugh, It Takes a Train to Cry\",\n \"4. From a Buick 6\", \"5. Ballad of a Thin Man\",\n \"6. Queen Jane Approximately\", \"7. Highway 61 Revisited\",\n \"8. Just Like Tom Thumb's Blues\", \"9. Desolation Row\"]\nblonde_on_blonde = [\"Select Song\",\"1. Rainy Day Women #12 & 35\", \"2. Pledging My Time\",\n \"3. Visions of Johanna\", \"4. One of Us Must Know(Sooner or Later)\",\n \"5. I Want You\", \"6. Stuck Inside of Mobile with the Memphis Blues Again\",\n \"7. Leopard-Skin Pill-Box Hat\", \"8. Just Like a Woman\",\n \"9. Most Likely You Go Your Way and I'll Go Mine\",\n \"10. Temporary Like Achilles\", \"11. Absolutely Sweet Marie\",\n \"12. 4th Time Around\", \"13. Obviously 5 Believers\",\n \"14. Sad Eyed Lady of the Lowlands\"]\nbringing_it_all_back_home = [\"Select Song\", \"1. Subterranean Homesick Blues\",\t\"2. She Belongs to Me\",\n \"3. Maggie's Farm\",\t\"4. Love Minus Zero/No Limit\",\t\"5. Outlaw Blues\",\n \"6. On the Road Again\",\t\"7. Bob Dylan's 115th Dream\",\n \"8. Mr. Tambourine Man\",\t\"9. Gates of Eden\",\t\"10. It's Alright, Ma (I'm Only Bleeding)\",\n \"11. It's All Over Now, Baby Blue\"]\nblood_on_the_tracks = [\"Select Song\", \"1. Tangled Up in Blue\",\t\"2. Simple Twist of Fate\",\n \"3. You're a Big Girl Now\", \"4. Idiot Wind\",\n \"5. You're Gonna Make Me Lonesome When You Go\",\n \"6. Meet Me in the Morning\",\t\"7. Lily, Rosemary and the Jack of Hearts\",\n \"8. If You See Her, Say Hello\", \"9. Shelter from the Storm\",\n \"10. Buckets of Rain\"]\t" } ]
3
XianYX/Python-
https://github.com/XianYX/Python-
0a5fe645fe20b5d480d8f13d9d5c7e3e79d23016
ae79ce935b84de59caaa82acf535e0c89f130e79
8cecbbad4062a87b8a9ef68339f2228bcd8d053f
refs/heads/master
2020-12-30T23:23:21.526941
2014-10-17T17:38:48
2014-10-17T17:38:48
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5192307829856873, "alphanum_fraction": 0.5688585638999939, "avg_line_length": 33.319149017333984, "blob_id": "a7e042762d1f30f071e0c581563cca02b0e999ee", "content_id": "801092c5617ad618526de5d6b8f2b7347f6129be", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1618, "license_type": "no_license", "max_line_length": 106, "num_lines": 47, "path": "/2013-10-9/三角.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for computing the angle of the triangle\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013年10月11日 21:22:04\n\"\"\"\n#import the necessary function\nfrom math import sqrt\nfrom math import acos\nfrom math import degrees\n#input the coordinate of the points\ntry:\n x1,y1=eval(raw_input(\"please enter the coordinate for the first point:\"))\n x2,y2=eval(raw_input(\"please enter the coordinate for the second point:\"))\n x3,y3=eval(raw_input(\"please enter the coordinate for the third point:\"))\nexcept:\n print(\"Error\")\nelse:\n #jurdge if the point is the same point\n if (x1==x2 and y1==y2)or(x1==x3 and y1==y3)or(x2==x3 and y2==y3):\n print(\"The same print!\")\n else:\n try:\n #compute the length of three sides\n a=sqrt((x1-x2)**2+(y1-y2)**2)\n b=sqrt((x1-x3)**2+(y1-y3)**2)\n c=sqrt((x3-x2)**2+(y3-y2)**2)\n #compute the radians of the triangle\n A=acos((a*a-b*b-c*c)/(-2*b*c))\n B=acos((b*b-a*a-c*c)/(-2*a*c))\n C=acos((c*c-a*a-b*b)/(-2*a*b))\n except:\n print(\"Error\")\n else:\n #output the result in radians\n print(\"the radians of A,B,C is:\"+format(A,\".2f\")+\" \"+format(B,\".2f\")+\" \"+format(C,\".2f\"))\n #switch radians into degrees\n A1=degrees(A)\n B1=degrees(B)\n C1=degrees(C)\n #output the result in degrees\n print(\"the degrees of A,B,C is:\"+format(A1,\".2f\")+\" \"+format(B1,\".2f\")+\" \"+format(C1,\".2f\"))" }, { "alpha_fraction": 0.4956521689891815, "alphanum_fraction": 0.5130434632301331, "avg_line_length": 14.266666412353516, "blob_id": "e83132767d6c71455e643777b5e6d98d61efa39e", "content_id": "8fe6b9bb001b8e657384e50b26bc7a7a632bf826", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 42, "num_lines": 15, "path": "/mould/jiecheng.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "def f(n):\n\"\"\"\nthis fucation is to computer the factorial\n\"\"\"\ntry:\n n=int(n)\n if n<0:\n print(\"error\")\n elif n==0:\n return 1\n else:\n result=n*f(n-1)\n return result\nexcept:\n print(\"error\")\n\n" }, { "alpha_fraction": 0.5539358854293823, "alphanum_fraction": 0.6209912300109863, "avg_line_length": 21.866666793823242, "blob_id": "41e3f49d449bfc73bc9ba2aa806eb5d002deb191", "content_id": "023523c1506e8b5507a3c6afff5e47fc0021991c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 343, "license_type": "no_license", "max_line_length": 54, "num_lines": 15, "path": "/2013-11-20/贪吃蛇神马的都去死/rank.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 19 16:48:35 2013\n\n@author: Administrator\n\"\"\"\nf = open('middle2104.txt','r')\nlst = [line.strip().lower() for line in f.readlines()]\nf.close()\n#lst.sort(key = lambda x:len(x),reverse=False)\nfor i in range(0,len(lst)-1):\n lst[i] += '\\n'\nf = open('middle2104.txt','w')\nf.writelines(lst)\nf.close()\n" }, { "alpha_fraction": 0.5690072774887085, "alphanum_fraction": 0.6803874373435974, "avg_line_length": 20.763158798217773, "blob_id": "14e6310bc7a91a9df617c9d39022f8dd12a11daf", "content_id": "982acc12a73f83a88bf0610da7e40080add321c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 826, "license_type": "no_license", "max_line_length": 70, "num_lines": 38, "path": "/2013-09-27/2.4.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for calculatoring some questions.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-12 12:19:06\n\"\"\"\nfrom math import pi\n#question one\nprint(\"Question one\")\ndef v(r):\n volume=4*pi*r**3/3\n return volume\n#output the result\nprint(\"the volume of a sphere with radius is :\"+format(v(5),\".2f\"))\n#question two\nprint(\"Question two\")\ndef total_cost(n):\n sum=24.95*0.6*n+33+0.75*(n-1)\n return sum\n#print the result \nprint(\"the total cost for 60 copies is:\"+format(total_cost(60),\".2f\"))\n#question three\nprint(\"Question three\")\nsth=6\nstm=52\nsts=sth*3600+stm*60\nt1=8*60+15\nt2=7*60+12\neds=sts+2*t1+3*t2\nedh=eds//3600\nedm=(eds%3600)//60\neds=(eds%3600)%60\nprint(\"time for breakfastis:\"+str(edh)+\":\"+str(edm)+\":\"+str(eds))" }, { "alpha_fraction": 0.6178217530250549, "alphanum_fraction": 0.7168316841125488, "avg_line_length": 18.461538314819336, "blob_id": "5150ecdb0b2aff05d07d54766575abbfcf482b8d", "content_id": "085867573c3188f411c2afb0509bcd428f757010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 505, "license_type": "no_license", "max_line_length": 46, "num_lines": 26, "path": "/2013-09-27/2.3.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for test the type of variable.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-12 12:07:55\n\"\"\"\n#give the value to the variables\nwidth=17\nheight=12.0\ndelimiter=\".\"\n#print the result ang the type of the result\nprint(width/2)\nprint(type(width/2))\nprint(width/2.0)\nprint(type(width/2.0))\nprint(height/3)\nprint(type(height/3))\nprint(1+2*5)\nprint(type(1+2*5))\nprint(delimiter*5)\nprint(type(delimiter*5))" }, { "alpha_fraction": 0.4649493098258972, "alphanum_fraction": 0.47888514399528503, "avg_line_length": 23.163265228271484, "blob_id": "77d77de31824605f5fad5e4b756696d84bd4a31d", "content_id": "2c3400f5c7b24cd2750daa375a9ec97fb12a8134", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2368, "license_type": "no_license", "max_line_length": 82, "num_lines": 98, "path": "/Snake game with frame.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "from Tkinter import *\nimport random \nclass SnakeGame:\n \n\n def __init__(self):\n # moving step for snake and food\n self.step=15\n # game score\n self.gamescore=0\n \n # to initialize the snake in the range of (x1,y1,x2,y1) \n r=random.randrange()\n self.snakeX=[]\n self.snakeY=[]\n \n # to initialize the moving direction\n self.snakeDirection = ' ' \n self.snakeMove = []\n # to draw the game frame \n window = Tk()\n window.geometry(\"600x400+10+10\")\n window.maxsize(600,400)\n window.minsize(600,400)\n window.title(\"Snake game\")\n \n self.frame1=Frame(... ...)\n self.frame2=Frame(... ...)\n self.canvas=Canvas(... ...)\n self.score_label=Label(... ...)\n \n self.frame1.pack()\n self.frame2.pack(fill=BOTH)\n self.score_label.pack(side=LEFT)\n self.canvas.pack(fill=BOTH)\n \n self.draw_wall()\n self.draw_score()\n self.draw_food()\n self.draw_snake()\n \n self.play()\n \n window.mainloop()\n\n \"=== View Part ===\" \n def draw_wall(self):\n pass\n \n def draw_score(self):\n self.score() # score model\n self.score_label.config(... ...) # score view\n \n def draw_food(self):\n self.canvas.delete(\"food\")\n self.foodx,self.foody=self.random_food() #food model\n self.canvas.create_rectangle(... ...,fill=... ,tags=\"food\") #food view\n\n def draw_snake(self):\n self.canvas.delete(\"snake\")\n x,y=self.snake() # snake model\n for i in range(len(x)): # snake view\n self.canvas.create_rectangle(... ..., fill=...,tags='snake') \n \n \"=== Model Part ===\"\n # food model\n def random_food(self): \n pass\n \n # snake model\n def snake(self):\n pass\n \n #score model \n def score(self):\n pass\n \n \n \"=== Control Part ===\" \n def iseated(self):\n pass\n \n def isdead(self):\n pass\n \n def move(self,event):\n pass\n \n def play(self):\n pass \n \n def gameover(self):\n pass\n\n def restart(self,event):\n pass\n \nSnakeGame()\n" }, { "alpha_fraction": 0.509560227394104, "alphanum_fraction": 0.581261932849884, "avg_line_length": 19.134614944458008, "blob_id": "155a8e03f14d17b1f1bbd38d1573939df1663122", "content_id": "87ca3d47d744ad8569d02066d4999a49f9173f9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1046, "license_type": "no_license", "max_line_length": 46, "num_lines": 52, "path": "/2013-10-13/1130310226_段艺_2.1.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for three modle.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-13 19:25:46\n\"\"\"\n#modle one\ni=0\n#define the range of number to add up\nfor n in range(1,1000):\n#add up the number use the loop struction \n if n%3==0 or n%5==0:\n i += n\nprint(i)\n#modle two\nsum=0\n#set the loop range\nfor i in range(3,2000000):\n if i%2==0:\n#reject the obb number\n continue\n else:\n#select the prime to add up\n j=int(i**0.5)+1\n m=0 \n for n in range(2,j):\n if i%n==0:\n m+=1\n break\n if m==0:\n sum+=i\n#add 2 to sum\nprint(2+sum)\n#modle three\n#import datetime modle to solve the problem^_^\nfrom datetime import date\nsum=0\n#set the range of year and month\nfor year in range(1901,2001):\n for month in range(1,13):\n#set the date\n d=date(year,month,1)\n#compute the name of a day \n c=d.isoweekday()\n if c==7:\n sum += 1\nprint(sum)" }, { "alpha_fraction": 0.4190434515476227, "alphanum_fraction": 0.4269416332244873, "avg_line_length": 42.846153259277344, "blob_id": "23dcffa7e0b75454ba872d64a6272a9dfe5ad2f3", "content_id": "255c43e81561d1e8165dab1e55fb599ad94ce414", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2279, "license_type": "no_license", "max_line_length": 159, "num_lines": 52, "path": "/1130310224_董頔_5.1.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "alli=[] #Make empty lists \nrepeat=[] #for later use.\nraw={} #To store every word user input.\nwords=[] #To convenience's sake, use this to help to print.\nfor i in range(100):\n x=raw_input()\n y=x.split() #Get every word from what user has input with every raw. #Make those words in every raw into a list\n for v in y:\n if v in raw:\n if i + 1 not in raw[v]:\n raw[v].append(i+1)\n else:\n raw[v] = [i + 1]\n\nordered_list=sorted(raw.iteritems(),key=lambda asd:asd[0]) #Make word to be display into dictionary order\n\nfor items in ordered_list:\n temp = list(items[1])\n temp.sort()\n print items[0]+':',str(temp)[1:-1] #Print Inverted Index.\n\nwhile True: #For user to search\n tuplei=[]\n AND=[] #For the position-set list.\n e=raw_input()\n if e == '':\n break #If input return, end loop and end program.\n else: #Make input into lower case.\n d=e.split() #Get word user want to search.\n for el in d:\n if not raw.has_key(el):\n print 'None'\n conti=False\n break\n else:\n tuplei.append(el)\n conti=True #If there is no such word user input in the former list, print None, else, make them into a list.\n if conti==True:\n for needs_done in tuplei:\n a = raw[needs_done]\n AND.append(a)\n #Get the position of word showed up.\n all_lst = [i for i in range(1,101)]\n allset=set(all_lst)\n for n in AND:\n allset=allset & set(n)\n if len(allset)==0:\n print 'None' #Get the public position.\n else:\n out =list(allset)\n out.sort()\n print str(out)[1:-1] #Display the result." }, { "alpha_fraction": 0.6690518856048584, "alphanum_fraction": 0.7245080471038818, "avg_line_length": 30, "blob_id": "8a0a6fd05ce3279eba791ada6d70f690b28dd103", "content_id": "d58794481233c9c0862d38193e0ae74221d1aa67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 107, "num_lines": 18, "path": "/2013-9-29/飞机跑道长度.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for calcuate the minimum runway length for the airplane . \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-9-29 14:11:13\n\"\"\"\n\n#enter the average acceleratio and the take-off speed . \nv,a=eval(raw_input(\"Please enter the take-off speed v and the average acceleration a (separed by comma):\"))\n#calcuate the minimum runway length for the airplane\nl=format(v**2/(2*a),\".3f\")\n#print the result\nprint(\"The minimum runway length for the airplane is :\"+str(l))\n\n" }, { "alpha_fraction": 0.7037914395332336, "alphanum_fraction": 0.7298578023910522, "avg_line_length": 34.16666793823242, "blob_id": "1740c4e27e76253b075277d8a943932d93a62a31", "content_id": "6d21ae5152eb91de842ddf8087dd3847b16b6507", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 48, "num_lines": 12, "path": "/2013-11-27/6.1/1130310226_段艺_6.1/1130310226_段艺_6.1/mysite/addr_book/models.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "from django.db import models\n\nclass People(models.Model):\n\t\"\"\"docstring for People\"\"\"\n\tstudent_num = models.CharField(max_length = 15)\n\tname = models.CharField(max_length = 30)\n\tsex = models.BooleanField(default = True)\n\tphone = models.CharField(max_length = 15)\n\temail = models.EmailField()\n\tQQ = models.CharField(max_length = 11)\n\taddress = models.CharField(max_length = 50)\n\tbirthday = models.CharField(max_length = 8)\n" }, { "alpha_fraction": 0.527275562286377, "alphanum_fraction": 0.5693578720092773, "avg_line_length": 38.349693298339844, "blob_id": "1e8ad495c634421dac47664a9cbcfff08779f279", "content_id": "55068ba19b05a4cb3441264f7c1ebfa362326482", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6416, "license_type": "no_license", "max_line_length": 178, "num_lines": 163, "path": "/2013-10-29/csnake.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 02 17:00:05 2013\n\n@author: lenovo\n\"\"\"\n\nfrom Tkinter import * #import random class SnakeGame:\nimport random\nclass snakeGame:\n def __init__(self): \n # game score\n self.gamescore=0\n self.gamelevel=1\n # moving step for snake and food self.step=15 \n self.step=15\n # to initialize the snake in the range of (x1,y1,x2,y1) r=random.randrange() \n self.snakeX=[150,150+self.step,150+self.step*2]\n self.snakeY=[240,240,240]\n self.random_foodx=random.randrange(15,573,self.step)\n self.random_foody=random.randrange(15,358,self.step)\n self.foodX=[self.random_foodx]\n self.foodY=[self.random_foody]\n self.gamespeed=0\n # to initialize the moving direction\n self.snakeDirection = 'left'\n # to draw the game frame \n window = Tk()\n window.geometry(\"600x400+10+10\")\n window.maxsize(600,400)\n window.minsize(600,400)\n window.title(\"Snake game\")\n self.frame1=Frame(window,width=600,height=380)\n self.frame2=Frame(window,width=600,height=20)\n self.canvas=Canvas(self.frame1,bg='yellow',width=600,height=380)\n self.score_label=Label(self.frame2,text='score=0,level=1')\n self.frame1.pack()\n self.frame2.pack(fill=BOTH)\n self.score_label.pack(side=LEFT)\n self.canvas.pack(fill=BOTH)\n self.draw_wall()\n self.draw_score()\n self.draw_food()\n self.draw_snake()\n self.play()\n window.mainloop()\n \"=== View Part ===\"\n #draw wall \n def draw_wall(self):\n self.canvas.create_line((13,13),(13,373),(13,373),(588,373),(588,373),(588,13),(588,13),(13,13),fill='blue',width=3)\n #draw score\n def draw_score(self): \n self.score() # score model\n self.score_label.config(self.score_label,text='score='+str(self.gamescore)+'level='+str(self.gamelevel))\n if self.gamelevel<17:\n self.gamespeed=200-(self.gamelevel-1)*10\n else:\n self.gamespeed=50\n \n #draw food\n def draw_food(self): \n self.random_food()\n self.canvas.create_rectangle([[self.foodx,self.foody],[self.foodx+self.step,self.foody+self.step]],fill='red',outline='black',tags=\"food\") #food view\n #draw snake\n def draw_snake(self):\n self.canvas.delete(\"snake\")\n self.snake()\n for i in range(len(self.snakeX)): # snake view \n self.canvas.create_rectangle([[self.snakeX[i],self.snakeY[i]],[self.snakeX[i]+self.step,self.snakeY[i]+self.step]],fill='orange',outline='black',width=1,tags='snake')\n \"=== Model Part ===\" \n #food model \n def random_food(self):\n self.canvas.delete(\"food\")\n for i in range(0,self.gamelevel):\n self.foodx=random.randrange(15,573,self.step)\n self.foody=random.randrange(15,358,self.step)\n if i<=len(self.foodX):\n self.foodX[i]=self.foodx\n self.foodY[i]=self.foody\n else:\n self.random_food()\n self.foodX.insert(i,self.foodx)\n self.foodY.insert(i,self.foody)\n \n #score model \n def snake(self):\n self.canvas.bind('<Key>',self.move)\n\n\n #score model\n def score(self):\n if self.iseated==True:\n self.gamescore=self.gamescore+10\n if self.gamescore/10.0==1:\n self.gamelevel=self.gamelevel+1 \n \n \"=== Control Part ===\"\n def iseated(self):\n for i in range(0,len(foodX)+1):\n if foodX[i]==snakeX[0] and foodY[i]==snakeY[0]:\n return true\n \n def isdead(self):\n if self.snakeX[0] < 15 or self.snakeX[0] > 573 or self.snakeY[0] < 15 or self.snakeY[0] > 358:\n return True\n for i in range(1,len(self.snakeX)-1):\n if self.snakeX[i] == self.snakeX[0] and self.snakeY[i] == self.snakeY[0]:\n return True \n return False\n \n #contronl direction \n def move(self,event):\n for i in range(len(self.snakeX)):\n if event.keycode=='38' or event.keycode=='87' and snakeDirection!='down':\n self.snakeDirection=='up'\n self.snakeY[i]=self.snakeY[i]+self.step\n elif event.keycode=='40' or event.keycode=='83' and snakeDirection!='up':\n self.snakeDirection=='down'\n self.snakeY[i]=self.snakeY[i]-self.step\n elif event.keycode=='37' or event.keycode=='65' and snakeDirection!='right':\n self.snakeDirection=='left'\n self.snakeX[i]=self.snakeX[i]-self.step\n elif event.keycode=='38' or event.keycode=='87' and snakeDirection!='left':\n self.snakeDirection=='right' \n self.snakeX[i]=self.snakeX[i]+self.step\n #bunding event\n def play(self):\n while True:\n self.canvas.bind('<Key>',self.move)\n self.canvas.focus_set()\n \n if self.isdead()==True: \n break\n self.draw_snake()\n self.canvas.after(self.gamespeed)\n self.canvas.update()\n self.gameover()\n \n def gameover(self):\n canvas.delete('food')\n canvas.delete('snake')\n canvas.create_text(300,100,text='GAMEOVER',font='Verdana 30 bold',tags='text')\n canvas.create_text(300,200,text='Your score = '+str(self.gamescore)+',and your level ='+str(self.gamelevel),font='Verdana 30 bold',tags='text')\n canvas.create_text(300,275,text='Press T to try again',tags='text')\n canvas.bind('<Key>',restart)\n def restart(self,event):\n if event.keycode==84:\n canvas.delete('text')\n # game score\n self.gamescore=0\n # moving step for snake and food self.step=15 \n self.step=15\n # to initialize the snake in the range of (x1,y1,x2,y1) r=random.randrange() \n self.snakeX=[150,150+step,150+step*2]\n self.snakeY=[240,240,240]\n self.random_foodx=random.randrange(15,573,self.step)\n self.random_foody=random.randrange(15,358,self.step)\n self.foodX=[random_foodx]\n self.foodY=[random_foody]\n # to initialize the moving direction\n self.snakeDirection = 'left'\n \nsnakeGame() " }, { "alpha_fraction": 0.5152386426925659, "alphanum_fraction": 0.538240373134613, "avg_line_length": 23.85714340209961, "blob_id": "2c1105459df23a11da9a3366be727cce7a229a6e", "content_id": "c855d1aa3792d28e7a9cf84a5f487f7f9a858b1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1739, "license_type": "no_license", "max_line_length": 64, "num_lines": 70, "path": "/2013-11-24/1130310226_段艺_5.1.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is Inverted index.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-11-23 00:53:46\n\"\"\"\n#define the list and dictionary will be used\ndic = {}\nkeys = []\ndel_lst = []\nfor_rank = []\n#define a function to jurdge if the key words are in dictionary.\ndef all_in(key_str,dic):\n key_lst = key_str.split()\n for key in key_lst:\n if key not in dic:\n return False\n return True\n#input the strings and create the dictionary\nfor num in range (0,100):\n Str = raw_input()\n temp_lst = Str.split()\n for word in temp_lst:\n if word in dic:\n dic[word].add(num+1)\n else:\n item = set()\n item.add(num+1)\n dic[word] = item\n#print the dic\nsum_lst = dic.items()\nsum_lst.sort(key = lambda x:x[0])\nfor item in sum_lst:\n line = list(item[1])\n line.sort()\n for i in range(0,len(line)):\n line[i] = str(line[i])\n print item[0] + ':',', '.join(line)\n#input the index \nwhile True:\n key_str = raw_input()\n if key_str == '':\n break\n else:\n keys.append(key_str)\n#do with the key words\nfor key_str in keys:\n if all_in(key_str,dic) == False:\n print 'None'\n #save all the number of line in a list\n else:\n key_lst = key_str.split()\n sum_set = set()\n for k in key_lst:\n sum_set = sum_set | dic[k]\n for k in key_lst:\n sum_set = sum_set & dic[k]\n if len(sum_set) == 0:\n print 'None'\n else:\n lst = list(sum_set)\n lst.sort()\n for i in range(len(lst)):\n lst[i] = str(lst[i])\n print ', '.join(lst)" }, { "alpha_fraction": 0.6258309483528137, "alphanum_fraction": 0.7150996923446655, "avg_line_length": 30.878787994384766, "blob_id": "e5cf9c874bca7d371ced1e89016090f089982ab2", "content_id": "a19708efc93795d612d7cc9032a0680eb43527e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1053, "license_type": "no_license", "max_line_length": 73, "num_lines": 33, "path": "/2013-09-27/四个圆.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for drawing four circles . \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-9-27 01:30:39\n\"\"\"\n\nimport turtle #import turtle module\nturtle.pensize(2) #set pen size to 2 pixels\nturtle.penup() #pull the pen up\nturtle.goto(-100,0) #put the pen to (-100,0)\nturtle.pendown() #pull the pen down\nturtle.circle(100) #draw the first circle with the radius of 100 pixels\n\nturtle.penup() #pull the pen up\nturtle.goto(100,0) #put the pen to (100,0)\nturtle.pendown() #pull the pen down\nturtle.circle(100) #draw the second circle with the radius of 100 pixels\n\nturtle.penup() #pull the pen up\nturtle.goto(-100,-200) #put the pen to (-100,-200)\nturtle.pendown() #pull the pen down\nturtle.circle(100) #draw the third circle with the radius of 100 pixels\n\nturtle.penup() #pull the pen up\nturtle.goto(100,-200) #put the pen to (-100,-200)\nturtle.pendown() #pull the pen down\nturtle.circle(100) #draw the fourth circle with the radius of 100 pixels\n\n" }, { "alpha_fraction": 0.6691418886184692, "alphanum_fraction": 0.6705858111381531, "avg_line_length": 30.08333396911621, "blob_id": "a270ae353e8c035ea78abce8ac687b00411106f0", "content_id": "f50928b03ab35c8a45d977f927eeaba24512fd67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4848, "license_type": "no_license", "max_line_length": 116, "num_lines": 156, "path": "/2013-12-14/1130310226_段艺_project02/dy/addr_book/views.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "from django.template import Context, RequestContext\nfrom django.shortcuts import render_to_response\nfrom models import People\nfrom django import forms\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User\n\n\ndef index(req):\n\treturn render_to_response(\"index.html\",{})\n\n@login_required\ndef add(req):\n\tempty = []\n\tusername = req.session.get('username','')\n\tif req.POST:\n\t\tpost = req.POST\n\t\tfor i in post.keys():\n\t\t\tif post[i] == '':\n\t\t\t\tempty.append(i + ' shouldn\\'t be empty!')\n\t\tif len(empty) == 0:\t\t\t\n\t\t\tnew_people = People(\n\t\t\t\tstudent_num = post[\"student_num\"],\n\t\t\t\tname = post[\"name\"],\n\t\t\t\tphone = post[\"phone\"],\n\t\t\t\temail = post[\"email\"],\n\t\t\t\tQQ = post[\"QQ\"],\n\t\t\t\taddress = post[\"address\"],\n\t\t\t\tbirthday = post[\"birthday\"],\n\t\t\t\tmaster = username,\n\t\t\t\t)\n\t\t\tif post[\"sex\"] == \"M\":\n\t\t\t\tnew_people.sex = True\n\t\t\telse:\n\t\t\t\tnew_people.sex = False\n\t\t\tnew_people.save()\n\treturn render_to_response(\"add.html\",{'empty':empty},context_instance = RequestContext(req))\n\n@login_required\ndef show(req):\n\tusername = req.session.get('username','')\n\tif req.POST:\n\t\tk = req.POST[\"k\"]\n\t\tshow = True\n\t\tresult = People.objects.filter(master__exact=username).filter(name__contains=k)\n\t\tif len(result) == 0:\n\t\t\tshow = False\n\t\tc = Context({'result':result,'show':show,'username':username})\n\t\treturn render_to_response(\"search.html\", c ,context_instance = RequestContext(req))\n\telse:\n\t\tpeople_lst = People.objects.filter(master__exact=username)\n\t\tc = Context({\"people_lst\":people_lst,'username':username})\n\t\treturn render_to_response(\"show.html\",c,context_instance = RequestContext(req))\n\n@login_required\ndef delete(req):\n\tusername = req.session.get('username','')\n\tId = req.GET[\"id\"]\n\tpeople = People.objects.filter(id__exact = Id)[0]\n\tif people.master != username:\n\t\treturn HttpResponseRedirect('/show/')\n\tPeople.objects.get(id = Id).delete()\n\tpeople_lst = People.objects.filter(master__exact=username)\n\tc = Context({\"people_lst\":people_lst,'username':username})\t\n\treturn render_to_response(\"show.html\",c)\n\n@login_required\ndef change(req):\n\tusername = req.session.get('username','')\n\tId = req.GET[\"id\"]\n\tpeople = People.objects.filter(id__exact = Id)[0]\n\tif people.master != username:\n\t\treturn HttpResponseRedirect('/show/')\n\tempty = []\n\tif req.POST:\n\t\tpost = req.POST\n\t\tfor i in post.keys():\n\t\t\tif post[i] == '':\n\t\t\t\tempty.append(i + ' shouldn\\'t be empty!')\n\t\tif len(empty) == 0:\t\n\t\t\tif post[\"sex\"] == \"M\":\n\t\t\t\tSex = True\n\t\t\telse:\n\t\t\t\tSex = False\t\t\n\t\t\tPeople.objects.filter(id = Id).update(\n\t\t\t\tstudent_num = post[\"student_num\"],\n\t\t\t\tname = post[\"name\"],\n\t\t\t\tsex = Sex,\n\t\t\t\tphone = post[\"phone\"],\n\t\t\t\temail = post[\"email\"],\n\t\t\t\tQQ = post[\"QQ\"],\n\t\t\t\taddress = post[\"address\"],\n\t\t\t\tbirthday = post[\"birthday\"],\n\t\t\t\t)\n\t\t\tpeople_lst = People.objects.filter(master__exact=username)\n\t\t\tc = Context({\"people_lst\":people_lst,'username':username})\t\n\t\t\treturn render_to_response(\"show.html\",c)\t\t\t\n\tc = Context({'empty':empty,'people':people})\n\treturn render_to_response(\"change.html\", c ,context_instance = RequestContext(req))\n\ndef register(req):\n\tif req.method == 'POST':\n\t\tform = UserCreationForm(req.POST)\n\t\tif form.is_valid():\n\t\t\tnew_user = form.save()\n\t\t\treturn HttpResponseRedirect('/login/')\n\telse:\n\t\tform = UserCreationForm()\n\treturn render_to_response('register.html',{'form':form},context_instance = RequestContext(req))\n\ndef login(req):\n\tif req.method == 'POST':\n\t\tusername = req.POST.get('username','')\n\t\tpassword = req.POST.get('password','')\n\t\tuser = auth.authenticate(username = username, password = password)\n\t\tif user is not None and user.is_active:\n\t\t\tauth.login(req,user)\n\t\t\treq.session['username'] = username\n\t\t\treturn HttpResponseRedirect('/show/')\n\treturn render_to_response('login.html',{},context_instance = RequestContext(req))\n\ndef logout(req):\n auth.logout(req)\n return HttpResponseRedirect(\"/\")\n\n@login_required\ndef setpasswd(req):\n\tusername = req.session.get('username','')\n\tuser = User.objects.filter(username__exact=username)[0]\n\tstatus = ''\n\tempty = []\n\tif req.method == 'POST':\n\t\tfor k in req.POST.keys():\n\t\t\tif req.POST[k] == '':\n\t\t\t\tempty.append(k + ' shouldn\\'t be empty!')\n\t\tif len(empty) == 0:\n\t\t\tpasswd = req.POST['passwd']\n\t\t\tnewpasswd = req.POST['newpasswd']\n\t\t\trepasswd = req.POST['repasswd']\n\t\t\tif user.check_password(passwd) == True:\n\t\t\t\tif newpasswd == repasswd:\n\t\t\t\t\tuser.set_password(newpasswd)\n\t\t\t\t\tuser.save()\n\t\t\t\t\tauth.logout(req)\n\t\t\t\t\treturn HttpResponseRedirect(\"/setsuccess/\")\n\t\t\t\telse:\n\t\t\t\t\tstatus = 're_error'\n\t\t\telse:\n\t\t\t\tstatus = 'passwd_error'\n\treturn render_to_response('setpasswd.html',{'empty':empty,'status':status},context_instance = RequestContext(req)) \n\ndef setsuccess(req):\n\treturn render_to_response('setsuccess.html',{})" }, { "alpha_fraction": 0.5493333339691162, "alphanum_fraction": 0.5746666789054871, "avg_line_length": 31.65217399597168, "blob_id": "28728d382bd2d231f7c832e7fb418ce1aefbc5cc", "content_id": "da9b79d9bf528e94a68bf2b3dc4567b3b62e5ebb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 751, "license_type": "no_license", "max_line_length": 83, "num_lines": 23, "path": "/2013-10-5/temperatuer,c2.py", "repo_name": "XianYX/Python-", "src_encoding": "WINDOWS-1252", "text": "# -*- coding: gbk -*-\n\"\"\"\ntemperature part c2\n\"\"\"\n #modle 2 begins\n #input the value of the temperature and switch the type of valu to float\n try:\n t=float(raw_input(\"please input the value:\"))\n #if the value is unable to use output \"error\"\n except:\n print(\"error\")\n #if the value is number,continue to covert the temperature to degree Keivin\n else:\n c=t+273.15\n #jurdge if the value is reasonable\n if c>0:\n #if the value is reasonable, output the result\n f=(9*(c-273.15)/5+32)\n print(\"The temperature is :\"+str(format(f,\".2f\"))+\"¨H\")\n else:\n #if the value is unreasonable,output \"error\"\n print(\"error\")\n #mode 2 finishes" }, { "alpha_fraction": 0.6332573890686035, "alphanum_fraction": 0.6332573890686035, "avg_line_length": 34.119998931884766, "blob_id": "3bbcf0af5145a430d14b0ba171ef989619e9267f", "content_id": "0d17a7c607ac4e3d89fef7ad8976875ce4a95782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 878, "license_type": "no_license", "max_line_length": 55, "num_lines": 25, "path": "/2013-12-14/1130310226_段艺_project02/dy/dy/urls.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nfrom django.contrib.auth.views import login,logout\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'dy.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$','addr_book.views.index'),\n url(r'^login/$','addr_book.views.login'),\n url(r'^accounts/login/$','addr_book.views.login'),\n url(r'^register/$','addr_book.views.register'),\n url(r'^logout/$','addr_book.views.logout'),\n url(r'^add/$','addr_book.views.add'),\n url(r'^show/$','addr_book.views.show'),\n url(r'^delete/$','addr_book.views.delete'),\n url(r'^change/$','addr_book.views.change'),\n url(r'^setpasswd/$','addr_book.views.setpasswd'),\n url(r'^setsuccess/$','addr_book.views.setsuccess'),\n)\n" }, { "alpha_fraction": 0.5432273745536804, "alphanum_fraction": 0.5782184600830078, "avg_line_length": 30.457096099853516, "blob_id": "ff7fc8fc4265e40172a3ea39a9b7cb86a9c7812e", "content_id": "7fc8931f3a1f7f22e87054620a21f96553daccfb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19062, "license_type": "no_license", "max_line_length": 214, "num_lines": 606, "path": "/2013-11-20/贪吃蛇神马的都去死/1130310226_段艺_project1/main.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is a snake game\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-11-22 19:39:08\n\"\"\"\n#import the modles which will be made\nfrom SimpleDialog import *\nfrom tkSimpleDialog import *\nfrom Tkinter import *\nimport random,sys\n\n\"=== View Part ===\"\ndef draw_wall():\n '''\n This function is to draw the wall.\n '''\n canvas.create_line((13,13),(13,363),(13,363),(588,363),(588,363),(588,13),(588,13),(13,13),width=5,fill='blue')\n\ndef first_page():\n '''\n This function is to draw the first page og the game.\n '''\n global step,gamescore,user_score,user_level,order_i,speed,speed0,level,k,is_wrong,user_index,difficult,user_lst,\\\n new_name,new_item,word_lst,word_lst_random,snakeX,snakeY,foodx,foody,foodX,foodY,new_x,new_y,snakeDirection\n #initialize the vailiable\n step = 15\n gamescore = 0\n user_score = 0\n user_level = 0\n order_i = 0\n speed = 0\n speed0 = 0\n level = 1\n k = -1\n is_wrong = False\n user_index = -1\n difficult = ''\n user_lst = []\n new_name = ''\n new_item = []\n word_lst = []\n word_lst_random = []\n snakeX=[]\n snakeY=[]\n foodX = [random.randrange(16,571,step)]\n foodY = [random.randrange(16,346,step)]\n foodx=random.randrange(16,571,step)\n foody=random.randrange(16,346,step) \n new_x=16+(3*step)\n new_y=16+(6*step) \n for i in range(6,9):\n snakeX.append(16+(3*step))\n snakeY.append(16+(i*step)) \n for i in range(0,2):\n random_food()\n foodX.append(foodx)\n foodY.append(foody) \n snakeDirection = 'DOWN'\n #draw the page\n draw_wall()\n canvas.create_text(200,100,text='English',font='Times 50 bold',fill='red')\n canvas.create_text(200,220,text='Snake\\nGame',font='Times 50 bold ',fill='red')\n canvas.create_text(450,100,text='1.New ',font='Courier 25 bold')\n canvas.create_text(450,150,text='2.Load',font='Courier 25 bold')\n canvas.create_text(450,200,text='3.Rank',font='Courier 25 bold')\n canvas.create_text(450,250,text='4.Exit',font='Courier 25 bold')\n canvas.focus_set()\n canvas.bind('<Key>',start)\n \ndef rank():\n '''\n This function is to draw the rank of the game\n '''\n canvas.delete(ALL)\n draw_wall()\n canvas.focus_set()\n canvas.bind('<Key>',rank_key_event)\n try:\n f = open('rank.txt','r')\n except:\n f = open('rank.txt','a')\n f.close()\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n else:\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n if len(user_lst) == 0:\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n else:\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n canvas.create_text(450,340,text='Press b to back \\n and d to del',font='Courier 15 bold')\n canvas.create_text(300,40,text='Name\\tScore\\tLevel\\tDifficulty',font='Courier 15 bold') \n lst_y = 80\n for i in user_lst:\n canvas.create_text(300,lst_y,text=i,font='Courier 15 bold')\n lst_y += 30\n\ndef show_words():\n '''\n This function is to show the words to user\n '''\n canvas.delete(ALL)\n draw_wall()\n word_lst_random = []\n global difficult,speed0,speed,word_order\n if difficult == 'easy':\n speed0 = 200\n speed = 200\n f = open('easy.txt','r')\n word_lst = [line.strip() for line in f.readlines()]\n f.close()\n for i in range(0,3):\n random_word = random.choice(word_lst)\n word_lst_random.append(random_word)\n elif difficult =='middle':\n speed0 = 150\n speed = 150\n f = open('middle.txt','r')\n word_lst = [line.strip() for line in f.readlines()]\n f.close()\n for i in range(0,4):\n random_word = random.choice(word_lst)\n word_lst_random.append(random_word)\n elif difficult == 'hard':\n speed0 = 100\n speed = 100\n f = open('hard.txt','r')\n word_lst = [line.strip() for line in f.readlines()]\n f.close()\n for i in range(0,5):\n random_word = random.choice(word_lst)\n word_lst_random.append(random_word)\n lst_y = 80\n for i in word_lst_random:\n canvas.create_text(300,lst_y,text=i,font='Courier 25 bold')\n lst_y += 50\n canvas.create_text(450,350,text='Ready? Press \\'T\\' to test!',font='Courier 12 bold')\n word_order = ''.join(word_lst_random)\n canvas.focus_set()\n canvas.bind('<Key>',test)\n\ndef draw_score():\n '''\n This function is to display the score and level\n '''\n score() \n score_label.config(text='Score: %d Level:%d'%(gamescore,level))\n\ndef draw_snake():\n '''\n This function is to draw a snake.\n '''\n canvas.delete(\"snake\")\n snake()\n canvas.create_rectangle(snakeX[0],snakeY[0],snakeX[0]+step,snakeY[0]+step,fill=('green'),tags='snake')\n for i in range(1,gamescore+3): \n canvas.create_rectangle(snakeX[i],snakeY[i],snakeX[i]+step,snakeY[i]+step,fill=('orange'),tags='snake')\n\ndef draw_food():\n '''\n This function is to draw the food.\n '''\n global foodx,foody,level,k,j,now_let1ter\n #delete the food which is already eated\n canvas.delete('food')\n update_food()\n now_letters()\n #draw the food ,number of food is decided by level\n for j in range(0,3):\n canvas.create_rectangle(foodX[j],foodY[j],foodX[j]+step,foodY[j]+step,fill='red',tags=\"food\")\n canvas.create_text(foodX[j] + 8,foodY[j] + 9,text = now_letter[j],tags='food',font='Courier 12 bold')\n\n\"=== Model Part ===\"\ndef score():\n '''\n This function is to compute the score,level and speed\n '''\n global gamescore,level,speed,speed0,speed\n level=1+gamescore/10\n if speed > 60:\n speed = speed0 - (level-1)*10\n else:\n speed=50\n\ndef snake():\n '''\n This function is to set the snake\n '''\n global snakeX,snakeY,new_x,new_y\n #if eat a food,app a new coodinate in the list\n if iseated()==True:\n snakeX.insert(0,new_x)\n snakeY.insert(0,new_y)\n draw_food()\n else:\n snakeX.insert(0,new_x)\n snakeY.insert(0,new_y)\n #delete the old coodinate of the list\n del snakeX[-1]\n del snakeY[-1]\n\ndef random_food():\n '''\n This function is to get a random coodinate of the food\n '''\n global foodx,foody,snakeX,snakeY,foodX,foodY\n foodx=random.randrange(16,571,step)\n foody=random.randrange(16,346,step)\n #jurdge if the new food corves on the snake,if so change the food\n for i in range(0,len(snakeX)-1):\n if foodx==snakeX[i] and foody==snakeY[i]:\n random_food()\n for i in range(0,len(foodX)-1):\n if foodx == foodX[i] and foody == foodY[i]:\n random_food()\n\n\n\"=== Control Part ===\"\ndef start(event):\n '''\n This function is to jurdje which key the user press\n '''\n if event.keycode == 49:\n new()\n elif event.keycode == 50:\n load()\n elif event.keycode == 51:\n rank()\n elif event.keycode == 52:\n sys.exit()\n\ndef rank_key_event(event):\n '''\n This function is to do with the key event in the rank\n '''\n if event.keycode == 66:\n canvas.delete(ALL)\n first_page()\n elif event.keycode == 68:\n f = open('rank.txt','r')\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n if len(user_lst) == 0:\n canvas.delete(ALL)\n draw_wall()\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n canvas.focus_set()\n canvas.bind('<Key>',rank_key_event) \n else:\n rank_del()\n \ndef rank_del():\n '''\n This function is to delete the user in the rank\n '''\n f = open('rank.txt','r')\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n name_lst = []\n for item in user_lst:\n name_lst.append(item.split()[0]) \n dlg = SimpleDialog(canvas,text='which one to del',buttons=name_lst,default=0,)\n del_index = dlg.go()\n del user_lst[del_index]\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n for i in range(0,len(user_lst)-1):\n user_lst[i] += '\\n'\n f = open('rank.txt','w')\n f.writelines(user_lst)\n f.close()\n canvas.delete(ALL)\n rank()\n \ndef new():\n '''\n This function is to let user create a new user\n '''\n global new_name,new_item\n canvas.delete(ALL)\n draw_wall()\n new_name = askstring(title = 'new',prompt = 'input your name')\n if new_name == '' or new_name == None:\n first_page()\n else:\n canvas.create_text(300,100,text='please choose the difficulty',font='Courier 20 bold')\n canvas.create_text(300,200,text='\\'E\\'for easy\\n\\'M\\'for middle\\n\\'H\\'for hard',font='Courier 20 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n canvas.focus_set()\n canvas.bind('<Key>',new_event)\n\ndef new_event(event):\n '''\n This function is to do with the hard choice\n '''\n global difficult\n if event.keycode == 66:\n canvas.delete(ALL)\n first_page()\n elif event.keycode == 69:\n difficult = 'easy'\n show_words()\n elif event.keycode == 77:\n difficult = 'middle'\n show_words()\n elif event.keycode == 72:\n difficult = 'hard'\n show_words()\n\ndef load():\n '''\n This function is to load the user information\n '''\n global new_name,difficult,level,user_score,user_level,user_index\n canvas.delete(ALL)\n draw_wall()\n try:\n f = open('rank.txt','r')\n except:\n f = open('rank.txt','a')\n f.close()\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n else:\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n if len(user_lst) == 0:\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n else:\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n canvas.create_text(300,40,text='Name\\tScore\\tLevel\\tDifficulty',font='Courier 15 bold') \n lst_y = 80\n for i in user_lst:\n canvas.create_text(300,lst_y,text=i,font='Courier 15 bold')\n lst_y += 30\n f = open('rank.txt','r')\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n if len(user_lst) == 0:\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n canvas.focus_set()\n canvas.bind('<Key>',rank_key_event)\n else:\n name_lst = []\n for item in user_lst:\n name_lst.append(item.split()[0]) \n dlg = SimpleDialog(canvas,text='which one to choose',buttons=name_lst,default=0,)\n user_index = dlg.go()\n new_name = user_lst[user_index].split()[0]\n user_score = int(user_lst[user_index].split()[1])\n user_level = int(user_lst[user_index].split()[2])\n difficult = user_lst[user_index].split()[3]\n draw_score()\n show_words()\n\ndef move(event):\n '''\n This function is to change the direction the snake move\n '''\n global snakeDirection\n if (event.keycode == 37 or event.keycode == 65) and snakeDirection!='RIGHT':\n snakeDirection='LEFT'\n elif (event.keycode == 39 or event.keycode == 68) and snakeDirection!='LEFT':\n snakeDirection='RIGHT'\n elif (event.keycode == 38 or event.keycode == 87) and snakeDirection!='DOWN':\n snakeDirection='UP'\n elif (event.keycode == 40 or event.keycode == 83) and snakeDirection!='UP':\n snakeDirection='DOWN'\n\ndef isdead():\n '''\n This function is to jurdje if the snake is dead\n '''\n global k\n if snakeX[0] < 16 or snakeX[0] > 571 or snakeY[0] < 16 or snakeY[0] > 346:\n return True\n for i in range(1,len(snakeX)-1):\n if snakeX[i] == snakeX[0] and snakeY[i] == snakeY[0]:\n return True\n return False\n\ndef iseated():\n '''\n This function is to jurdje if the snake is eated a food.\n '''\n global foodx,foody,gamescore,k,level,order_i,is_wrong\n #jurdge is the food is be eaten\n for k in range(0,3):\n if foodX[k]==snakeX[0] and foodY[k]==snakeY[0]:\n if now_letter[k] != word_order[order_i]:\n is_wrong = True\n gamescore -= 1\n gamescore += 1\n order_i += 1\n if order_i == len(word_order):\n draw_score()\n success()\n draw_score()\n return True\n break\n \ndef play():\n '''\n This function is the main part of the game\n '''\n global new_x,new_y,speed,foodX,foodY,is_wrong\n #main loop only stop when dead\n canvas.delete(ALL)\n draw_wall()\n draw_food()\n while True:\n #set the focus in the canvas\n canvas.focus_set()\n #get the key and and change the direction\n canvas.bind('<Key>',move)\n #move \n if snakeDirection=='LEFT':\n new_x = snakeX[0]-step \n elif snakeDirection=='RIGHT':\n new_x = snakeX[0]+step\n elif snakeDirection=='UP':\n new_y = snakeY[0]-step\n else:\n new_y = snakeY[0]+step\n if isdead()==True:\n #if the snake is dead break the game .\n break\n if is_wrong == True:\n break\n draw_snake()\n #set the speed\n canvas.after(speed)\n canvas.update()\n #if the snake is dead, use the 'gameover' function\n gameover()\n\ndef test(event):\n '''\n This function is to start the game.\n '''\n if event.keycode == 84:\n play()\n\ndef update_food():\n '''\n This function is to update the list of the food\n ''' \n global foodx,foody,foodX,foodY\n for i in range(0,3):\n random_food()\n foodX[i] = foodx\n foodY[i] = foody\n\ndef gameover():\n '''\n This function is to display the information of when dead\n '''\n #delete the items on the canvas and show the score.\n canvas.delete(ALL)\n draw_wall()\n canvas.create_text(300,100,text='Game Over!',font='Verdana 30 bold')\n canvas.create_text(300,200,text='Your score is : '+str(gamescore),font='Verdana 20 bold')\n canvas.create_text(300,275,text='Press N to start a new game \\n Q to exit',font='Verdana 15 bold')\n canvas.create_text(400,325,text='Design by DuanYi \\nEmail:[email protected]',font='Verdana 10 bold')\n canvas.bind('<Key>',restart)\n\ndef restart(event):\n '''\n This function is to restart the game.\n '''\n global gamescore,k,step,snakeX,snakeY,order_i,new_x,new_y,foodx,foody,snakeDirection,level,speed,speed0,foodX,foodY,user_score,difficult,user_lst,new_name,new_item,word_lst,word_lst_random,user_level,user_index\n if event.keycode==81:\n #if user press Q ,exit\n sys.exit() \n if event.keycode==78:\n #delete all the items.\n canvas.delete(ALL) \n #reset all the variable\n step = 15\n gamescore = 0\n user_score = 0\n user_level = 0\n order_i = 0\n speed = 0\n speed0 = 0\n level = 1\n k = -1\n user_index = -1\n difficult = ''\n user_lst = []\n new_name = ''\n new_item = []\n word_lst = []\n word_lst_random = []\n snakeX=[]\n snakeY=[]\n foodX = [random.randrange(16,571,step)]\n foodY = [random.randrange(16,346,step)]\n foodx=random.randrange(16,571,step)\n foody=random.randrange(16,346,step)\n \n new_x=16+(3*step)\n new_y=16+(6*step)\n \n for i in range(6,9):\n snakeX.append(16+(3*step))\n snakeY.append(16+(i*step))\n \n for i in range(0,2):\n random_food()\n foodX.append(foodx)\n foodY.append(foody)\n draw_score()\n first_page() \n\ndef success():\n '''\n This function is to show the success information to the user.\n '''\n global gamescore,user_score,difficult,user_lst,new_name,new_item,word_lst,word_lst_random,user_level,user_index\n canvas.delete(ALL)\n draw_wall()\n user_score += gamescore\n user_level += 1\n new_item.append(new_name)\n new_item.append(str(user_score))\n new_item.append(str(user_level))\n new_item.append(difficult)\n item = '\\t'.join(new_item) \n f = open('rank.txt','r')\n user_lst = [line.strip() for line in f.readlines()]\n f.close() \n user_lst.append(item)\n if user_index != -1:\n del user_lst[user_index]\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n for i in range(0,len(user_lst)-1):\n user_lst[i] += '\\n' \n f = open('rank.txt','w')\n f.writelines(user_lst)\n f.close()\n canvas.create_text(300,100,text='You Win!',font='Verdana 30 bold')\n canvas.create_text(300,200,text='Your score is : '+str(gamescore),font='Verdana 20 bold')\n canvas.create_text(300,275,text='press N to next game \\n Q to exit',font='Verdana 15 bold')\n canvas.focus_set()\n canvas.bind('<Key>',success_event) \n\ndef success_event(event):\n '''\n This function is to let the user to choice the key event of the success.\n '''\n global gamescore,order_i,level\n if event.keycode == 81:\n sys.exit()\n if event.keycode == 78:\n gamescore = 0\n order_i = 0\n level = 1\n draw_score()\n show_words()\n\ndef now_letters():\n '''\n this function is to create a list of letter which the food sign.\n '''\n global word_order,order_i,now_letter\n now_letter = []\n now_letter.append(word_order[order_i])\n now_letter.append(random.choice(letter_lst))\n now_letter.append(random.choice(letter_lst))\n \n\"=== Main Part ===\"\nletter_lst = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\ngamescore = 0\nlevel = 1\n\nwindow = Tk()\nwindow.geometry(\"600x400+10+10\")\nwindow.maxsize(600,400)\nwindow.minsize(600,400)\nwindow.title(\"Snake game\")\n\nframe1=Frame(window,height=370,width=600)\nframe2=Frame(window,height=30,width=600)\ncanvas=Canvas(frame1,bg='yellow',width=600,height=370)\nscore_label=Label(frame2,text='Score: %d Level:%d'%(gamescore,level))\n\nframe1.pack()\nframe2.pack(fill=BOTH)\nscore_label.pack(side=LEFT)\ncanvas.pack(fill=BOTH)\n\nfirst_page()\n\nwindow.mainloop()" }, { "alpha_fraction": 0.5521390438079834, "alphanum_fraction": 0.5775400996208191, "avg_line_length": 31.565217971801758, "blob_id": "563396d0b2901de0497275e74c3e35e05c001fde", "content_id": "ed114069f2737cba342bfa93ec000c08b376fd5b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 750, "license_type": "no_license", "max_line_length": 83, "num_lines": 23, "path": "/2013-10-5/temperatuer,c1.py", "repo_name": "XianYX/Python-", "src_encoding": "IBM852", "text": "# -*- coding: gbk -*-\n\"\"\"\ntemperature part c1\n\"\"\"\n #modle 1 begins\n #input the value of the temperature and switch the type of valu to float\n try:\n f=float(raw_input(\"please input the value:\"))\n #if the value is unable to use output \"error\"\n except:\n print(\"error\")\n #if the value is number,continue to covert the temperature to degree Keivin\n else:\n c=(f-32)*5/9+273.15\n #jurdge if the value is reasonable\n if c>0:\n #if the value is reasonable, output the result\n t=c-273.15\n print(\"The temperature is :\"+str(format(t,\".2f\"))+\"íŠ\")\n else:\n #if the value is unreasonable,output \"error\"\n print(\"error\")\n #mode 1 finishes" }, { "alpha_fraction": 0.6617100238800049, "alphanum_fraction": 0.7267658114433289, "avg_line_length": 30.647058486938477, "blob_id": "3a5b608b31e4127e65893217b45eab407ced8f32", "content_id": "48fc3a9353165e0d65302d6ab5d45e7dadc3c6aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 538, "license_type": "no_license", "max_line_length": 128, "num_lines": 17, "path": "/2013-9-29/加速度.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for calcuate the average acceleration . \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-9-29 13:23:22\n\"\"\"\n#enter the starting velocity , the ending velocity and the time span . \nv0,v1,t=eval(raw_input(\"Please enter the starting velocity v0 ,the ending velocity v1 and the time span t (separed by comma):\"))\n#calcuate the average acceleration\na=format((v1-v0)/t,\".4f\")\n#print the result\nprint(\"The average acceleration is :\"+str(a))\n" }, { "alpha_fraction": 0.5750256776809692, "alphanum_fraction": 0.6094552874565125, "avg_line_length": 31.450000762939453, "blob_id": "d4fef7590c3d5314134b24feaaefa1094ba683ef", "content_id": "bbb9783a1ff21be3d78607aca9dec999d40d606b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1949, "license_type": "no_license", "max_line_length": 83, "num_lines": 60, "path": "/2013-10-5/temperatuer2.py", "repo_name": "XianYX/Python-", "src_encoding": "WINDOWS-1252", "text": "# -*- coding: gbk -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for coverting the temperature . \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-3 23:23:15\n\"\"\"\n#input the model and the value\nm=raw_input(\"please input the modle:\")\n#select the modle \nif m==\"1\":\n #modle 1 is to switch the degree Fahrenheit to degree centigrade \n #modle 1 begins\n #input the value of the temperature and switch the type of valu to float\n try:\n f=float(raw_input(\"please input the value:\"))\n #if the value is unable to use output \"error\"\n except:\n print(\"error\")\n #if the value is number,continue to covert the temperature to degree Keivin\n else:\n c=(f-32)*5/9+273.15\n #jurdge if the value is reasonable\n if c>0:\n #if the value is reasonable, output the result\n t=c-273.15\n print(\"The temperature is :\"+str(format(t,\".2f\"))+\"¡æ\")\n else:\n #if the value is unreasonable,output \"error\"\n print(\"error\")\n #mode 1 finishes\nelif m==\"2\":\n #modle 2 is to switch the degree centigrade to degree Fahrenheit\n #modle 2 begins\n #input the value of the temperature and switch the type of valu to float\n try:\n t=float(raw_input(\"please input the value:\"))\n #if the value is unable to use output \"error\"\n except:\n print(\"error\")\n #if the value is number,continue to covert the temperature to degree Keivin\n else:\n c=t+273.15\n #jurdge if the value is reasonable\n if c>0:\n #if the value is reasonable, output the result\n f=(9*(c-273.15)/5+32)\n print(\"The temperature is :\"+str(format(f,\".2f\"))+\"¨H\")\n else:\n #if the value is unreasonable,output \"error\"\n print(\"error\")\n #mode 2 finishes\nelse :\n #if the modle is unable to use, output \"error\"\n print(\"error\") \n#finish" }, { "alpha_fraction": 0.5577617287635803, "alphanum_fraction": 0.6480144262313843, "avg_line_length": 20.30769157409668, "blob_id": "a924937f29525a604551ab23925f5d023502885f", "content_id": "61a09d0b6af046a8586005565653b9137503f72c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 554, "license_type": "no_license", "max_line_length": 46, "num_lines": 26, "path": "/2013-9-29/BMI.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for calcuate the BMI. \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-9-29 14:11:35\n\"\"\"\n\n#enter the weight \nw=eval(raw_input(\"Enter weighe in pounds :\" ))\n#enter the height \nh=eval(raw_input(\"Enter heighe in inch :\" ))\n#convert the unit of the weight ang height\nif w<0 or h<0:\n print(\"Error\")\nelse:\n w0=w*0.45359237\n h0=h*0.0254\n #calcuate the BMI\n BMI=format(w0/(h0**2),\".4f\")\n #output the result\n print(\"BMI is :\"+str(BMI))\n" }, { "alpha_fraction": 0.5753424763679504, "alphanum_fraction": 0.6255707740783691, "avg_line_length": 21.310344696044922, "blob_id": "901e527223a15fa8c94d309165e1816f7ce12a22", "content_id": "2ddd0a2ec2fc48a6f7375f6c225d78e3df20bf9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 657, "license_type": "no_license", "max_line_length": 55, "num_lines": 29, "path": "/2013-10-9/do-n.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for drawing four doing n. \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-9 21:25:59\n\"\"\"\n#make sure the number input is right\ntry:\n #input the value of number and loop times\n x=float(raw_input(\"please input the value:\"))\n n=eval(raw_input(\"please input the time of loop:\"))\nexcept:\n #if input the wrong thing output \"Error\"\n print(\"Error\") \nelse:\n #start to comput\n i=0\n while i<n:\n x=x**2+2\n i += 1\n #output the value\n print(\"the value is:\"+format(x,\".2f\"))\n#finish the program\nprint(\"End\")\n \n \n" }, { "alpha_fraction": 0.5492589473724365, "alphanum_fraction": 0.5902354121208191, "avg_line_length": 19.763635635375977, "blob_id": "8b3583fea45476b8316767c984c1bf723d81da07", "content_id": "b47b0816b967aeb42d9cb47917c5de7411fa7154", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1155, "license_type": "no_license", "max_line_length": 128, "num_lines": 55, "path": "/2013-10-29/test2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 29 19:56:34 2013\n\n@author: Administrator\n\"\"\"\n\nfrom Tkinter import *\nroot=Tk()\n\n\n\n\n\n\n\nfoodX=[0]\nfoodY=[0]\nfoodx=random.randrange(16,571,step)\nfoody=random.randrange(16,346,step)\n\n\n\n\ndef draw_food():\n global foodx,foody,level,k,j,foodname\n canvas.delete('food')\n for _ in range(1,level+1):#食物数量\n random_food()\n for i in range(0,len(snakeX)-1):\n if foodx==snakeX[i] and foody==snakeY[i]:\n random_food() \n del foodX[k]\n del foodY[k]\n foodX.insert(0,foodx)\n foodY.insert(0,foody)\n for j in range(0,level):\n canvas.create_rectangle(foodX[j],foodY[j],foodX[j]+step,foodY[j]+step,fill='red' ,tags=(\"food\",foodname)) #food view\n\ndef random_food():\n global foodx,foody \n foodx=random.randrange(16,571,step)\n foody=random.randrange(16,346,step)\n\ndef iseated():\n global foodx,foody,gamescore,k,level\n for k in range(0,level):\n if foodX[k]==snakeX[0] and foodY[k]==snakeY[0]:\n gamescore+=5\n score()\n break\n return True\n\nk=j=0\nfoodname='food'+str(j)\n\n\n\n\n\n" }, { "alpha_fraction": 0.5023696422576904, "alphanum_fraction": 0.6030805706977844, "avg_line_length": 12.612903594970703, "blob_id": "336b94738a684194a261fd66d36e61341354923d", "content_id": "3d52630ae60b715952f742f26fb964497ec73412", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 57, "num_lines": 62, "path": "/2013-10-9/sin.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for drawing graph of sin again and again.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-13 03:32:02\n\"\"\"\n#import the necessary functions\nfrom math import sin,pi\nfrom turtle import *\n#set the canvas\nsetworldcoordinates(-4*pi,-2*pi,4*pi,2*pi)\n#set the pensize\npensize(2)\n#draw X\npu()\ngoto(-4*pi,0)\npd()\ngoto(4*pi,0)\npu()\ngoto(4*pi-0.3,0.1)\npd()\ngoto(4*pi,0)\npu()\ngoto(4*pi-0.3,-0.1)\npd()\ngoto(4*pi,0)\n#draw Y\npu()\ngoto(0,-2*pi)\npd()\ngoto(0,2*pi)\npu()\ngoto(-0.1,2*pi-0.3)\npd()\ngoto(0,2*pi)\npu()\ngoto(0.1,2*pi-0.3)\npd()\ngoto(0,2*pi)\n\npu()\ngoto(-2*pi,-0.4)\npd()\nwrite(\"-2pi\")\npu()\ngoto(2*pi,-0.4)\npd()\nwrite(\"2pi\")\n\nwhile True:\n x=-3.5*pi\n pu()\n goto(x,sin(x))\n pd()\n while x<3.5*pi:\n x += 0.03\n goto(x,sin(x))\n" }, { "alpha_fraction": 0.3745318353176117, "alphanum_fraction": 0.41947564482688904, "avg_line_length": 18.814815521240234, "blob_id": "30abad87104ebce5de4fe8e8e5dbbecf0d008984", "content_id": "3fc126057c8fdca7833caf4f9207921427197525", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 46, "num_lines": 27, "path": "/2013-10-5/1130310226_段艺_1.2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nmodel = raw_input()\nflag = 1\nk = 273.15\ntry:\n origin = float(raw_input())\nexcept:\n flag = 0\nelse:\n if model == '1':\n result = (origin - 32) * 5 / 9 + k\n if result > 0:\n result -= k\n else:\n flag = 0\n elif model == '2':\n result += k\n if result > 0:\n result = 9 * (result - k) / 5 + 32\n else:\n flag = 0\n else:\n flag = 0\n if flag:\n print(str(format(result,'.2f')))\n else:\n print(\"Error\")" }, { "alpha_fraction": 0.5306443572044373, "alphanum_fraction": 0.5795784592628479, "avg_line_length": 31.3764705657959, "blob_id": "642529605acc52e6c841dc38fea8d53c2c831cd7", "content_id": "ec0e18da862b4ef7bb2a87a4dc9e0f2b027aedf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8256, "license_type": "no_license", "max_line_length": 115, "num_lines": 255, "path": "/2013-11-20/贪吃蛇神马的都去死1/main - 副本1.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 19 00:01:49 2013\n\n@author: Administrator\n\"\"\"\nfrom SimpleDialog import *\nfrom tkSimpleDialog import *\nfrom Tkinter import *\nimport random,sys\n\n\"=== View Part ===\"\ndef draw_wall():\n canvas.create_line((13,13),(13,363),(13,363),(588,363),(588,363),(588,13),(588,13),(13,13),width=5,fill='blue')\n\ndef first_page():\n draw_wall()\n canvas.create_text(200,100,text='Snake',font='Times 50 bold')\n canvas.create_text(200,200,text='Game',font='Times 50 bold ')\n canvas.create_text(450,100,text='1.New ',font='Courier 25 bold')\n canvas.create_text(450,150,text='2.Load',font='Courier 25 bold')\n canvas.create_text(450,200,text='3.Rank',font='Courier 25 bold')\n canvas.create_text(450,250,text='4.Exit',font='Courier 25 bold')\n canvas.focus_set()\n canvas.bind('<Key>',start)\n \ndef rank():\n canvas.delete(ALL)\n draw_wall()\n\n canvas.focus_set()\n canvas.bind('<Key>',rank_key_event)\n try:\n f = open('rank.txt','r')\n except:\n f = open('rank.txt','a')\n f.close()\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n else:\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n if len(user_lst) == 0:\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n else:\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n canvas.create_text(450,340,text='Press b to back \\n and d to del',font='Courier 15 bold')\n canvas.create_text(300,40,text='Name\\tScore\\tLevel\\tDifficulty',font='Courier 15 bold') \n lst_y = 80\n for i in user_lst:\n canvas.create_text(300,lst_y,text=i,font='Courier 15 bold')\n lst_y += 30\n\"=== Model Part ===\"\n\n\n\"=== Control Part ===\"\ndef start(event):\n if event.keycode == 49:\n new()\n elif event.keycode == 50:\n load()\n elif event.keycode == 51:\n rank()\n elif event.keycode == 52:\n sys.exit()\n\ndef rank_key_event(event):\n if event.keycode == 66:\n canvas.delete(ALL)\n first_page()\n elif event.keycode == 68:\n f = open('rank.txt','r')\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n if len(user_lst) == 0:\n canvas.delete(ALL)\n draw_wall()\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n canvas.focus_set()\n canvas.bind('<Key>',rank_key_event) \n else:\n rank_del()\n \ndef rank_del():\n f = open('rank.txt','r')\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n name_lst = []\n for item in user_lst:\n name_lst.append(item.split()[0]) \n dlg = SimpleDialog(canvas,text='which one to del',buttons=name_lst,default=0,)\n del_index = dlg.go()\n del user_lst[del_index]\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n for i in range(0,len(user_lst)-1):\n user_lst[i] += '\\n'\n f = open('rank.txt','w')\n f.writelines(user_lst)\n f.close()\n canvas.delete(ALL)\n rank()\n \ndef new():\n global new_name,new_item\n canvas.delete(ALL)\n draw_wall()\n new_name = askstring(title = 'new',prompt = 'input your name')\n if new_name == '' or new_name == None:\n first_page()\n else:\n canvas.create_text(300,100,text='please choose the difficulty',font='Courier 20 bold')\n canvas.create_text(300,200,text='\\'E\\'for easy\\n\\'M\\'for middle\\n\\'H\\'for hard',font='Courier 20 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n canvas.focus_set()\n canvas.bind('<Key>',new_event)\n\ndef new_event(event):\n global difficult,speed,show_time,word_lst,word_lst_random\n if event.keycode == 66:\n canvas.delete(ALL)\n first_page()\n elif event.keycode == 69:\n difficult = 'easy'\n speed = 200\n show_words()\n elif event.keycode == 77:\n difficult = 'middle'\n speed = 150\n show_words()\n elif event.keycode == 72:\n difficult = 'hard'\n speed = 100\n show_words()\n\ndef show_words():\n canvas.delete(ALL)\n draw_wall()\n global difficult\n if difficult == 'easy':\n f = open('easy1671.txt','r')\n word_lst = [line.strip() for line in f.readlines()]\n f.close()\n for i in range(0,3):\n random_word = word_lst[random.randrange(0,1672)]\n word_lst_random.append(random_word)\n elif difficult =='middle':\n f = open('middle2104.txt','r')\n word_lst = [line.strip() for line in f.readlines()]\n f.close()\n for i in range(0,4):\n random_word = word_lst[random.randrange(0,2105)]\n word_lst_random.append(random_word)\n elif difficult == 'hard':\n f = open('hard431.txt','r')\n word_lst = [line.strip() for line in f.readlines()]\n f.close()\n for i in range(0,5):\n random_word = word_lst[random.randrange(0,432)]\n word_lst_random.append(random_word)\n lst_y = 80\n for i in word_lst_random:\n canvas.create_text(300,lst_y,text=i,font='Courier 25 bold')\n lst_y += 50\n canvas.create_text(450,350,text='Ready? Press \\'T\\' to test!',font='Courier 12 bold')\n canvas.focus_set()\n canvas.bind('<Key>',test)\n\ndef load():\n global new_name,difficult,level,game_score\n canvas.delete(ALL)\n draw_wall()\n try:\n f = open('rank.txt','r')\n except:\n f = open('rank.txt','a')\n f.close()\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n else:\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n if len(user_lst) == 0:\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n else:\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n canvas.create_text(300,40,text='Name\\tScore\\tLevel\\tDifficulty',font='Courier 15 bold') \n lst_y = 80\n for i in user_lst:\n canvas.create_text(300,lst_y,text=i,font='Courier 15 bold')\n lst_y += 30\n f = open('rank.txt','r')\n user_lst = [line.strip() for line in f.readlines()]\n f.close()\n user_lst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n if len(user_lst) == 0:\n canvas.create_text(300,150,text='empty',font='Courier 25 bold')\n canvas.create_text(480,350,text='Press b to back',font='Courier 15 bold')\n canvas.focus_set()\n canvas.bind('<Key>',rank_key_event)\n else:\n name_lst = []\n for item in user_lst:\n name_lst.append(item.split()[0]) \n dlg = SimpleDialog(canvas,text='which one to choose',buttons=name_lst,default=0,)\n user_index = dlg.go()\n new_name = user_lst[user_index].split()[0]\n game_score = user_lst[user_index].split()[1]\n level = user_lst[user_index].split()[2]\n difficult = user_lst[user_index].split()[3]\n show_words()\n \ndef play():\n canvas.delete(ALL)\n draw_wall()\n print 'play'\n pass\n\ndef test(event):\n if event.keycode == 84:\n play()\n \n\"=== Main Part ===\"\n\ngamescore = 0\nspeed = 0\nlevel=1\ndifficult = ''\nuser_lst = []\nnew_name = ''\nnew_item = []\nword_lst = []\nword_lst_random = []\n\nwindow = Tk()\nwindow.geometry(\"600x400+10+10\")\nwindow.maxsize(600,400)\nwindow.minsize(600,400)\nwindow.title(\"Snake game\")\n\nframe1=Frame(window,height=370,width=600)\nframe2=Frame(window,height=30,width=600)\ncanvas=Canvas(frame1,bg='yellow',width=600,height=370)\nscore_label=Label(frame2,text='Score: %d Level:%d'%(gamescore,level))\n\nframe1.pack()\nframe2.pack(fill=BOTH)\nscore_label.pack(side=LEFT)\ncanvas.pack(fill=BOTH)\n\nfirst_page()\n\nwindow.mainloop()\n" }, { "alpha_fraction": 0.48840048909187317, "alphanum_fraction": 0.5396825671195984, "avg_line_length": 21.75, "blob_id": "058aaa23b52f75e449f29500c16f8305aa142dfd", "content_id": "0dd486a99f907aed8f8fcb32d4df0e892e82929a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 56, "num_lines": 36, "path": "/2013-10-9/BMI2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for calcuate the BMI. \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-12 17:19:03\n\"\"\"\ntry:\n #enter the weight \n w=float(raw_input(\"Enter weighe in kilograms :\" ))\n #enter the height \n h=float(raw_input(\"Enter heighe in inch maters :\" ))\nexcept:\n print(\"Error\")\nelse:\n if w<0 or h<0:\n print(\"Error\")\n else: \n #calcuate the BMI\n BMI=w/(h**2)\n #give the suggestion\n if BMI<18.5:\n sug=\"Underweight\"\n elif BMI<24.9:\n sug=\"Normal\"\n elif BMI<29.9:\n sug=\"Overweight\"\n else:\n sug=\"Obese\" \n #output the result\n print(\"BMI is :\"+format(BMI,\".2f\"))\n print(\"You are \"+sug+\".\")\n" }, { "alpha_fraction": 0.48694780468940735, "alphanum_fraction": 0.5381526350975037, "avg_line_length": 18.45098114013672, "blob_id": "518f72fecacf610932b4320f3b9f0425f35a34d2", "content_id": "3e7d903625b7f106e93e70f83458cc7e02ca90bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 996, "license_type": "no_license", "max_line_length": 55, "num_lines": 51, "path": "/2013-10-9/prime palindromes.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for picking the Prime palindromes.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-16 10:55:43\n\"\"\"\n#define the faction for jurdge prime \n\"\"\"\nThis fuction is to jurdje if the vaule of i is a prime.\nIf ture the fuction will return Ture.\n\"\"\"\n\ndef prime(n):\n j=int(n**0.5)+1\n m=0\n i=3\n while i<j:\n if n%i==0:\n m+=1\n break\n i+=2\n return m==0\n \n\"\"\"\nThis fuction is to turn the number\n\"\"\"\ndef turn(n):\n m=str(n) \n tn=int(m[::-1])\n return tn\n\n#set the start of the number \nn=11\n#main part\n#set the times of loop \nwhile n<100000:\n #select the prime\n if int(str(n)[0])%2!=0:\n if prime(n)==True: \n #turn the number\n tn=turn(n)\n #jurdge if the turn number is prime\n if prime(tn)==True:\n #print the result\n print(n) \n n+=2 \n" }, { "alpha_fraction": 0.5581632852554321, "alphanum_fraction": 0.5954081416130066, "avg_line_length": 27.409420013427734, "blob_id": "16a6f2f7a9e6e9eefa084a7bd75d63b8b607f79a", "content_id": "7b69aece6eab1477e484f9e7b43650b14d31f36d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7840, "license_type": "no_license", "max_line_length": 120, "num_lines": 276, "path": "/2013-10-29/1130310226_段艺_3.2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is a snake game\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-11-1 01:26:38\n\"\"\"\n\n#import the modles which will be made\nfrom Tkinter import *\nimport random,sys \n\n\"=== View Part ===\"\n \ndef draw_wall():\n '''\n This function is to draw the wall.\n '''\n canvas.create_line((13,13),(13,363),(13,363),(588,363),(588,363),(588,13),(588,13),(13,13),width=5,fill='blue')\n\ndef draw_score():\n '''\n This function is to display the score and level\n '''\n score() \n score_label.config(text='Score: %d Level:%d'%(gamescore,level)) \n \ndef draw_food():\n '''\n This function is to draw the food.\n '''\n global foodx,foody,level,k,j\n #delete the food which is already eated\n canvas.delete('food')\n random_food()\n #delete the old food's coordinate\n del foodX[k]\n del foodY[k]\n #put the new coordinate in the list\n foodX.insert(k,foodx)\n foodY.insert(k,foody)\n #draw the food ,number of food is decided by level\n for j in range(0,level):\n #when the level is up , put a new coordinate in the list\n try:\n canvas.create_rectangle(foodX[j],foodY[j],foodX[j]+step,foodY[j]+step,fill='red' ,tags=(\"food\")) \n except:\n #set a new food coordinate\n random_food()\n #add the new coodinate of the food in the list\n foodX.insert(j,foodx)\n foodY.insert(j,foody)\n #draw the food\n canvas.create_rectangle(foodX[j],foodY[j],foodX[j]+step,foodY[j]+step,fill='red' ,tags=(\"food\")) \n else:\n canvas.create_rectangle(foodX[j],foodY[j],foodX[j]+step,foodY[j]+step,fill='red' ,tags=(\"food\"))\n\ndef draw_snake():\n '''\n This function is to draw a snake.\n '''\n canvas.delete(\"snake\")\n snake()\n #draw the snake by the coodinate in the list\n for i in range(0,gamescore/10+3): \n canvas.create_rectangle(snakeX[i],snakeY[i],snakeX[i]+step,snakeY[i]+step,fill=('orange'),tags='snake') \n\n\"=== Model Part ===\"\n\ndef random_food():\n '''\n This function is to get a random coodinate of the food\n '''\n global foodx,foody,snakeX,snakeY\n foodx=random.randrange(16,571,step)\n foody=random.randrange(16,346,step)\n #jurdge if the new food corves on the snake,if so change the food\n for i in range(0,len(snakeX)-1):\n if foodx==snakeX[i] and foody==snakeY[i]:\n random_food()\n\ndef snake():\n '''\n This function is to set the snake\n '''\n global snakeX,snakeY,new_x,new_y\n #if eat a food,app a new coodinate in the list\n if iseated()==True:\n snakeX.insert(0,new_x)\n snakeY.insert(0,new_y)\n draw_food()\n else:\n snakeX.insert(0,new_x)\n snakeY.insert(0,new_y)\n #delete the old coodinate of the list\n del snakeX[-1]\n del snakeY[-1]\n \ndef score():\n '''\n This function is to compute the score,level and speed\n '''\n global gamescore,level,speed\n level=1+gamescore/100\n if level<17:\n speed=200-(level-1)*10\n else:\n speed=50\n \n\"=== Control Part ===\"\n\ndef iseated():\n '''\n This function is to jurdje if the snake is eated a food.\n '''\n global foodx,foody,gamescore,k,level\n #jurdge is the food is be eaten\n for k in range(0,level):\n if foodX[k]==snakeX[0] and foodY[k]==snakeY[0]:\n #when eat a food the score plus one\n gamescore+=10\n draw_score()\n return True\n break\n\ndef isdead():\n '''\n This function is to jurdje if the snake is dead\n '''\n if snakeX[0] < 16 or snakeX[0] > 571 or snakeY[0] < 16 or snakeY[0] > 346:\n return True\n for i in range(1,len(snakeX)-1):\n if snakeX[i] == snakeX[0] and snakeY[i] == snakeY[0]:\n return True \n return False\n\ndef move(event):\n '''\n This function is to change the direction the snake move\n '''\n global snakeDirection\n if (event.keycode == 37 or event.keycode == 65) and snakeDirection!='RIGHT':\n snakeDirection='LEFT'\n elif (event.keycode == 39 or event.keycode == 68) and snakeDirection!='LEFT':\n snakeDirection='RIGHT'\n elif (event.keycode == 38 or event.keycode == 87) and snakeDirection!='DOWN':\n snakeDirection='UP'\n elif (event.keycode == 40 or event.keycode == 83) and snakeDirection!='UP':\n snakeDirection='DOWN'\n \ndef play():\n '''\n This function is the main part of the game\n '''\n global new_x,new_y,speed\n #main loop only stop when dead\n while True:\n #set the focus in the canvas\n canvas.focus_set()\n #get the key and and change the direction\n canvas.bind('<Key>',move)\n #move \n if snakeDirection=='LEFT':\n new_x = snakeX[0]-step \n elif snakeDirection=='RIGHT':\n new_x = snakeX[0]+step\n elif snakeDirection=='UP':\n new_y = snakeY[0]-step\n else:\n new_y = snakeY[0]+step\n if isdead()==True:\n #if the snake is dead break the game .\n break \n draw_snake()\n #set the speed\n canvas.after(speed)\n canvas.update()\n #if the snake is dead, use the 'gameover' function\n gameover()\n \ndef gameover():\n '''\n This function is to display the information of when dead\n '''\n #delete the items on the canvas and show the score.\n canvas.delete('food')\n canvas.delete('snake')\n canvas.create_text(300,100,text='Game Over!',font='Verdana 30 bold')\n canvas.create_text(300,200,text='Your score is : '+str(gamescore),font='Verdana 20 bold')\n canvas.create_text(300,275,text='Press R to retry and press Q to exit',font='Verdana 15 bold')\n canvas.create_text(400,325,text='Design by DuanYi \\nEmail:[email protected]',font='Verdana 10 bold')\n canvas.bind('<Key>',restart)\n\ndef restart(event):\n '''\n This function is to restart the game.\n ''' \n if event.keycode==81:\n #if user press Q ,exit\n sys.exit()\n if event.keycode==82:\n #delete all the items.\n canvas.delete(ALL)\n global gamescore,step,snakeX,snakeY,new_x,new_y,foodx,foody,snakeDirection,level,speed,foodX,foodY\n #reset all the variable\n step=15\n gamescore=0\n level=1 \n snakeX=[]\n snakeY=[]\n new_x=16+(5*step)\n new_y=16+(3*step)\n foodX=[0]\n foodY=[0] \n foodx=random.randrange(16,571,step)\n foody=random.randrange(16,346,step)\n for i in range(6,9):\n snakeX.append(16+(i*step))\n snakeY.append(16+(3*step))\n snakeDirection = 'DOWN'\n draw_wall()\n draw_score()\n draw_food()\n draw_snake()\n #replay \n play()\n#main part of the game\n#set the variable\nstep=15\ngamescore=0\nlevel=1\nk=j=0\nspeed=200\nsnakeX=[]\nsnakeY=[]\nnew_x=16+(5*step)\nnew_y=16+(3*step)\nfoodX=[0]\nfoodY=[0]\nfoodx=random.randrange(16,571,step)\nfoody=random.randrange(16,346,step)\n# to initialize the snake\nfor i in range(6,9):\n snakeX.append(16+(i*step))\n snakeY.append(16+(3*step))\n# to initialize the moving direction\nsnakeDirection = 'DOWN' \n# to draw the game frame \nwindow = Tk()\nwindow.geometry(\"600x400+10+10\")\nwindow.maxsize(600,400)\nwindow.minsize(600,400)\nwindow.title(\"Snake game\")\n\nframe1=Frame(window,height=370,width=600)\nframe2=Frame(window,height=30,width=600)\ncanvas=Canvas(frame1,bg='yellow',width=600,height=370)\nscore_label=Label(frame2,text='Score: %d Level:%d'%(gamescore,level))\n\nframe1.pack()\nframe2.pack(fill=BOTH)\nscore_label.pack(side=LEFT)\ncanvas.pack(fill=BOTH)\n \ndraw_wall()\ndraw_score()\ndraw_food()\ndraw_snake()\n\nplay()\n\nwindow.mainloop()" }, { "alpha_fraction": 0.4618473947048187, "alphanum_fraction": 0.5180723071098328, "avg_line_length": 17.481481552124023, "blob_id": "5c34ba3bbff5c14d40d2d43013162a2d74e8ddcd", "content_id": "b394913905b3894d7a85b1bd50163310bc561460", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 498, "license_type": "no_license", "max_line_length": 35, "num_lines": 27, "path": "/2013-10-13/test.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 13 16:12:27 2013\n\n@author: Administrator\n\"\"\"\n#modle two\n#import the fucation of sqrt\nfrom math import sqrt\nsum=0\n#set the loop range\nfor i in range(3,100):\n if i%2==0:\n#reject the obb number\n continue\n else:\n#select the prime to add up\n j=int(sqrt(i))+1\n m=0 \n for n in range(2,j):\n if i%n==0:\n m+=1\n break\n if m==0:\n sum+=i\n#add 2 to sum\nprint(2+sum)" }, { "alpha_fraction": 0.6407563090324402, "alphanum_fraction": 0.6832982897758484, "avg_line_length": 24.399999618530273, "blob_id": "1d3b69773cca43ce647154eb45fd256f370e59a6", "content_id": "61de33cc9f54b31235ea9b582c84da29a25ccc58", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1904, "license_type": "no_license", "max_line_length": 92, "num_lines": 75, "path": "/2013-10-27/1130310226_段艺_3.1.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for GUI.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-27 16:23:36\n\"\"\"\n#import the necessary modle\nfrom Tkinter import *\nfrom tkFont import *\n#define function which will be used\ndef Rectangle():\n '''\n This fuction is to draw a rad rectangle and clean the canvas\n '''\n root.title('Ex3.1.2')\n canvas.delete('Str')\n global Text\n Text=''\n t.set('Mouse Event')\n canvas.create_rectangle(150,100,250,200,fill='red',tags='rectangle')\n\ndef keyboard(event):\n '''\n This fuction is to print what you input on canvas and clean the canvas before you input.\n '''\n root.title('Ex3.1.3')\n canvas.delete('rectangle')\n t.set('Keyboard Event')\n ft=Font(size=15,weight=BOLD)\n global Text\n Text+=event.char\n canvas.delete('Str')\n canvas.create_text(200,150,text=Text,font=ft,tags='Str')\n\ndef BK(event):\n '''\n This fuction is to delete the last letter which you input.\n '''\n root.title('Ex3.1.3')\n canvas.delete('rectangle')\n t.set('Keyboard Event')\n ft=Font(size=15,weight=BOLD)\n global Text\n Text=Text[0:len(Text)-1]\n canvas.delete('Str')\n canvas.create_text(200,150,text=Text,font=ft,tags='Str') \n#set the main window\nroot=Tk()\n#set the title of the window\nroot.title('Ex3.1.1')\n#define the global variable 'Text'\nText=''\n#set the text in lable and print the label on the screen\nt=StringVar()\nt.set('GUI widgets')\nlabel1=Label(root,textvariable =t)\n#set a canvas \ncanvas=Canvas(root,bg='white',width=400,height=300)\n#set a button which is to draw a red rectangle\nbutton1=Button(root,text='Rectangle',command=Rectangle)\n#focus the keyboard event\ncanvas.focus_set()\ncanvas.bind(\"<Key>\",keyboard)\ncanvas.bind(\"<BackSpace>\",BK)\n#pack the widget\nlabel1.pack()\ncanvas.pack()\nbutton1.pack()\n#enable the window \nroot.mainloop()" }, { "alpha_fraction": 0.5048859715461731, "alphanum_fraction": 0.5960912108421326, "avg_line_length": 19.46666717529297, "blob_id": "bcfc11187df2a6229610f1dd51ec01dd34b10d9e", "content_id": "43941061062fb6b19218f2d6e4dbdf6b35569579", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 307, "license_type": "no_license", "max_line_length": 45, "num_lines": 15, "path": "/2013-10-13/test2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 13 17:33:25 2013\n\n@author: Administrator\n\"\"\"\nfrom datetime import date,datetime.isoweekday\nsum=0\nfor year in range(1901,2001):\n for month in range(1,13):\n d=date(year,month,1)\n c=d.isoweekday()\n if c==7:\n sum += 1\nprint(sum)\n" }, { "alpha_fraction": 0.6734142899513245, "alphanum_fraction": 0.7219973206520081, "avg_line_length": 39.02702713012695, "blob_id": "034c61b89e3d11daded3410ec3be279a44cdaf8f", "content_id": "c6e23faa9e70b653282717059a6a21d9e8d769eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1482, "license_type": "no_license", "max_line_length": 118, "num_lines": 37, "path": "/2013-09-27/直线.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is to draw a line with the given coordinates and calculate the length between them.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.1\ndate:2013-9-27 03:09:54\n\"\"\"\n\nx1,y1=eval(raw_input(\"please enter the coordinates of the first point:\")) #input the coordinates of the first point\nx2,y2=eval(raw_input(\"please enter the coordinates of the second point:\")) #input the coordinates of the second point\nlength=((x1-x2)**2+(y1-y2)**2)**0.5 #calculate the length between the two point\n\nimport turtle #import turtle module\nturtle.pensize(2) #set pen size to 2 pixels\nturtle.penup() #pull the pen up\nturtle.goto(x1,y1) #put the pen to (x1,y1)\nturtle.pendown() #pull the pen down\nturtle.goto(x2,y2) #draw the line\n\nturtle.penup() #pull the pen up\nturtle.goto(x1,y1-10) #put the pen to the top of the first point \nturtle.pendown() #pull the pen down\nturtle.write((x1,y1),False) #write the coordinate of the first point\n\nturtle.penup() #pull the pen up\nturtle.goto(x2,y2+10) #put the pen to the top of the second point \nturtle.pendown() #pull the pen down\nturtle.write((x2,y2),False) #write the coordinate of the second point\n\nturtle.penup() #pull the pen up\nturtle.goto(0.5*(x1+x2)+10,0.5*(y1+y2)+10) #put the pen at the middle of the line\nturtle.pendown() #pull the pen down\nturtle.write(\"the length of the line is : \"+str(length),False,align=\"left\") #write the length of the line\n\n" }, { "alpha_fraction": 0.5025792121887207, "alphanum_fraction": 0.5471628308296204, "avg_line_length": 27.913043975830078, "blob_id": "73c607da94c719efb97716cc67e473d5de61fcf4", "content_id": "b9296019418de716f0f7dec405d3f9d146e42b92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2720, "license_type": "no_license", "max_line_length": 106, "num_lines": 92, "path": "/2013-10-9/三角2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for computing the angle of the triangle\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.1\ndate:2013年10月11日 21:22:04\n\"\"\"\n#import the necessary function\nfrom math import sqrt\nfrom math import acos\nfrom math import degrees\n\"\"\"\nthis function is to compute the length of the sides.\n\"\"\"\ndef length(a,b,c,d):\n l=sqrt((a-b)**2+(c-d)**2)\n return l\n\"\"\"\nthis function is to compute the radians of the angles.\n\"\"\"\ndef rad(a,b,c):\n r=acos((a*a-b*b-c*c)/(-2*b*c))\n return r\n\"\"\"\nthis function is to compute the slope of the sides.\n\"\"\" \ndef slope(a,b,c,d):\n k=(c-d)/(a-b)\n return k\n\"\"\"\nthis function is to out put the result.\n\"\"\"\ndef compute(x1,x2,x3,y1,y2,y3): \n try:\n #compute the length of three sides\n a=length(x1,x2,y1,y2)\n b=length(x1,x3,y1,y3)\n c=length(x2,x3,y2,y3)\n #compute the radians of the triangle\n A=rad(a,b,c)\n B=rad(b,c,a)\n C=rad(c,a,b)\n except:\n print(\"Error\")\n else:\n #output the result in radians\n print(\"the radians of A,B,C is:\"+format(A,\".2f\")+\" \"+format(B,\".2f\")+\" \"+format(C,\".2f\"))\n #switch radians into degrees\n A1=degrees(A)\n B1=degrees(B)\n C1=degrees(C)\n #output the result in degrees\n print(\"the degrees of A,B,C is:\"+format(A1,\".2f\")+\" \"+format(B1,\".2f\")+\" \"+format(C1,\".2f\")) \n\n#input the coordinate of the points\ntry:\n x1,y1=eval(raw_input(\"please enter the coordinate for the first point:\"))\n x2,y2=eval(raw_input(\"please enter the coordinate for the second point:\"))\n x3,y3=eval(raw_input(\"please enter the coordinate for the third point:\"))\nexcept:\n print(\"Please enter the number!\")\nelse:\n #jurdge if the points are the same point.\n if (x1==x2 and y1==y2)or(x1==x3 and y1==y3)or(x2==x3 and y2==y3):\n print(\"Don't input the same print!\")\n #jurdge if the points are on the same line.\n elif (x1==x2==x3)or(y1==y2==y3):\n print(\"Three points are on the same line!\") \n else:\n try:\n k1=slope(x1,x2,y1,y2)\n except:\n try:\n k2=slope(x2,x3,y2,y3)\n except:\n print(\"Three points are on the same line!\")\n else:\n compute(x1,x2,x3,y1,y2,y3)\n else:\n try:\n k2=slope(x2,x3,y2,y3)\n except:\n compute(x1,x2,x3,y1,y2,y3)\n else:\n if k1==k2:\n print(\"Three points are on the same line!\")\n else:\n compute(x1,x2,x3,y1,y2,y3)\n \n \n \n \n \n \n" }, { "alpha_fraction": 0.6569230556488037, "alphanum_fraction": 0.7138461470603943, "avg_line_length": 24, "blob_id": "c158e99fa966b5ed24f3ff3e5e3637eae2ecc94e", "content_id": "186f07f9cb7e43ad72503a8e19c1dfade6d80c5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 650, "license_type": "no_license", "max_line_length": 60, "num_lines": 26, "path": "/2013-9-29/future investment value.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for calcuating the future investment value. \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-9-29 14:51:56\n\"\"\"\n\n#enter the investment amount \na=eval(raw_input(\"Enter investment amount :\" ))\n#enter the annual interest rate\nr=eval(raw_input(\"Enter annual interest rate :\" ))\n#enter the number of years\ny=eval(raw_input(\"Enter number of years :\" ))\n#convert the unit of time and interest rate\nm=12*y\nr *= 0.01\nr /= 12\n#compute the future investment value\nvalue=str(format(a*(1+r)**m,\".2f\"))\n#output the result\nprint(\"Accumulated value is :\"+value)\n" }, { "alpha_fraction": 0.5387174487113953, "alphanum_fraction": 0.5880613327026367, "avg_line_length": 26.328283309936523, "blob_id": "745bf9fc92d3f4d5e940435710d7fed198718762", "content_id": "342a0bf412bede701b7d1ab11922c0d226767512", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5411, "license_type": "no_license", "max_line_length": 115, "num_lines": 198, "path": "/2013-10-29/3.2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 29 18:53:41 2013\n\n@author: Administrator\n\"\"\"\n\nfrom Tkinter import *\nimport random,sys \n\n\n\"=== View Part ===\" \ndef draw_wall():\n canvas.create_line((13,13),(13,363),(13,363),(588,363),(588,363),(588,13),(588,13),(13,13),width=5,fill='blue')\n\ndef draw_score():\n score() # score model\n score_label.config(text='Score:'+str(gamescore)+' level:'+str(level)) # score view\n \ndef draw_food():\n global foodx,foody,level\n canvas.delete(\"food\")\n for _ in range(1,level+1):\n random_food()\n for i in range(0,len(snakeX)-1):\n if foodx==snakeX[i] and foody==snakeY[i]:\n random_food()\n canvas.create_rectangle(foodx,foody,foodx+step,foody+step,fill='red' ,tags=\"food\") #food view\n\ndef draw_snake():\n canvas.delete(\"snake\")\n snake() # snake model\n for i in range(0,len(snakeX)-1):\n '''ganescore+3''' # snake view\n canvas.create_rectangle(snakeX[i],snakeY[i],snakeX[i]+step,snakeY[i]+step,fill=('orange'),tags='snake') \n\n\"=== Model Part ===\"\n# food model\ndef random_food():\n global foodx,foody \n foodx=random.randrange(16,571,step)\n foody=random.randrange(16,346,step)\n\n# snake model\ndef snake():\n global snakeX,snakeY,new_x,new_y\n if iseated()==True:\n snakeX.insert(0,new_x)\n snakeY.insert(0,new_y)\n draw_food()\n else:\n snakeX.insert(0,new_x)\n snakeY.insert(0,new_y)\n del snakeX[-1]\n del snakeY[-1]\n \n#score model \ndef score():\n global gamescore,level\n if gamescore<10:\n level=1\n elif gamescore<20:\n level=2\n elif gamescore<30:\n level=3\n elif gamescore<40:\n level=4\n else:\n level=5\n score_label.config(text='Score:'+str(gamescore)+' level:'+str(level))\n \n \n\n\"=== Control Part ===\" \ndef iseated():\n global foodx,foody,gamescore\n if foodx==snakeX[0] and foody==snakeY[0]:\n gamescore+=5\n score()\n return True\n\ndef isdead():\n if snakeX[0] < 16 or snakeX[0] > 571 or snakeY[0] < 16 or snakeY[0] > 346:\n return True\n for i in range(1,len(snakeX)-1):\n if snakeX[i] == snakeX[0] and snakeY[i] == snakeY[0]:\n return True \n return False\n\ndef move(event):\n global snakeDirection\n if (event.keycode == 37 or event.keycode == 65) and snakeDirection!='RIGHT':\n snakeDirection='LEFT'\n elif (event.keycode == 39 or event.keycode == 68) and snakeDirection!='LEFT':\n snakeDirection='RIGHT'\n elif (event.keycode == 38 or event.keycode == 87) and snakeDirection!='DOWN':\n snakeDirection='UP'\n elif (event.keycode == 40 or event.keycode == 83) and snakeDirection!='UP':\n snakeDirection='DOWN'\n\n# draw_snake()\n \ndef play():\n global new_x,new_y,speed\n while True:\n canvas.focus_set()\n canvas.bind('<Key>',move)\n if snakeDirection=='LEFT':\n new_x = snakeX[0]-step \n elif snakeDirection=='RIGHT':\n new_x = snakeX[0]+step\n elif snakeDirection=='UP':\n new_y = snakeY[0]-step\n elif snakeDirection=='DOWN':\n new_y = snakeY[0]+step\n if isdead()==True:\n break \n draw_snake()\n canvas.after(speed)\n canvas.update() \n gameover()\n \n\ndef gameover():\n canvas.delete('food')\n canvas.delete('snake')\n canvas.create_text(300,100,text='Game Over!',font='Verdana 30 bold',tags='text1')\n canvas.create_text(300,200,text='Your score is : '+str(gamescore),font='Verdana 20 bold',tags='text2')\n canvas.create_text(300,275,text='Press R to retry and press Q to exit',font='Verdana 15 bold',tags='text3')\n canvas.bind('<Key>',restart)\n\ndef restart(event):\n if event.keycode==81:\n sys.exit()\n if event.keycode==82:\n canvas.delete(ALL)\n global gamescore,step,snakeX,snakeY,new_x,new_y,foodx,foody,snakeDirection,level,speed\n step=15\n gamescore=0\n level=1 \n snakeX=[]\n snakeY=[]\n new_x=16+(5*step)\n new_y=16+(3*step)\n foodx=random.randrange(16,571,step)\n foody=random.randrange(16,346,step)\n for i in range(6,9):\n snakeX.append(16+(i*step))\n snakeY.append(16+(3*step))\n snakeDirection = 'DOWN'\n draw_wall()\n draw_score()\n draw_food()\n draw_snake()\n play()\n\n\nstep=15\ngamescore=0\nlevel=1 \nsnakeX=[]\nsnakeY=[]\nnew_x=16+(5*step)\nnew_y=16+(3*step)\nfoodx=random.randrange(16,571,step)\nfoody=random.randrange(16,346,step)\nspeed=200-(level-1)*10\n\nfor i in range(6,9):\n snakeX.append(16+(i*step))\n snakeY.append(16+(3*step))\n# to initialize the moving direction\nsnakeDirection = 'DOWN' \n# to draw the game frame \nwindow = Tk()\nwindow.geometry(\"600x400+10+10\")\nwindow.maxsize(600,400)\nwindow.minsize(600,400)\nwindow.title(\"Snake game\")\n\nframe1=Frame(window,height=370,width=600)\nframe2=Frame(window,height=30,width=600)\ncanvas=Canvas(frame1,bg='yellow',width=600,height=370)\nscore_label=Label(frame2,text='Score:'+str(gamescore)+' level:'+str(level))\n\nframe1.pack()\nframe2.pack(fill=BOTH)\nscore_label.pack(side=LEFT)\ncanvas.pack(fill=BOTH)\n \ndraw_wall()\ndraw_score()\ndraw_food()\ndraw_snake()\n\nplay()\n\nwindow.mainloop()\n" }, { "alpha_fraction": 0.4583646059036255, "alphanum_fraction": 0.507876992225647, "avg_line_length": 21.610170364379883, "blob_id": "2a13e6c3e33db2cb2247c1f061ec3f075787048b", "content_id": "bb18e19d8bb0b35a559b95ef1af814db0340e4a9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1333, "license_type": "no_license", "max_line_length": 103, "num_lines": 59, "path": "/2013-10-17/1130310226_段艺_2.2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for picking the Circular primes.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-16 22:20:32\n\"\"\"\ndef prime(n):\n \"\"\"\n This fuction is to jurdje if the vaule of i is a prime.\n If ture the fuction will return Ture.\n \"\"\"\n j=int(n**0.5)\n m=0\n i=3\n while i<j:\n if n%i==0:\n m+=1\n break\n i+=2\n return m==0\n\ndef prime_loop(n):\n \"\"\"\n This fuction is to loop the number in the prime\n \"\"\"\n s=n[1:]+n[0]\n return int(s)\n#set the value of the sum and n\nsum=0\nn=11\n#set the time of loop\nwhile n<1000000:\n #jurdje if 2\\4\\6\\8\\0\\5 are in the number if so skip the number\n m=str(n)\n q=\"2\" not in m and \"4\" not in m and \"6\" not in m and \"8\" not in m and \"5\" not in m and \"0\" not in m\n if q==True:\n #jurdge if n is a prime\n if prime(n)==True:\n l=len(m)\n i=1\n #loop the number and jurdge if the number is a perime\n while i<l:\n s=prime_loop(m)\n if prime(s)==False:\n break\n else:\n m=str(s)\n i+=1\n if i==l:\n #count the number\n sum+=1\n n+=2\n#output the result\nprint(sum+4)" }, { "alpha_fraction": 0.49171751737594604, "alphanum_fraction": 0.5309503078460693, "avg_line_length": 23.173913955688477, "blob_id": "7862ee8b3a56986678616a92f2818f3b4671bb39", "content_id": "c8bda51fdeae6e8149ab3cef59a12f15f7db705b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1147, "license_type": "no_license", "max_line_length": 71, "num_lines": 46, "path": "/2013-10-9/digital black hole.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for proving \"Digital black hole\"\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-13 12:16:02\n\"\"\"\n#this faction is for counting the number of the odd and even\ndef count_odd_and_even_number(n):\n n=str(n)\n i=j=0\n for m in range(1,l+1):\n #get the number in the string\n a=int(n[-1*m])\n #jurdge the parity of the number\n if a%2==0:\n #count the number of the odd and even\n j += 1\n else:\n i += 1\n b=str(j)+str(i)\n return b\n\n#input the number which is needed to prove\ntry:\n n=int(raw_input(\"please input a integer which is more than 99 : \"))\nexcept:\n print(\"please enter a number!\")\nelse:\n l=len(str(n))\n if l<3:\n print(\"The integer must more than 99!\")\n else:\n #output the result\n while True:\n l=len(str(n))\n m=count_odd_and_even_number(n)+str(l)\n print(m)\n n=int(m)\n if n==123:\n print(\"BLACK HOLE!\")\n break\n \n \n \n" }, { "alpha_fraction": 0.5958904027938843, "alphanum_fraction": 0.6027397513389587, "avg_line_length": 18.53333282470703, "blob_id": "e443439d52e4f5a1cb52365e003effd48ae8ce6a", "content_id": "e69f2f8fde93fba33574a5bbc7efb4552b646e64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 292, "license_type": "no_license", "max_line_length": 55, "num_lines": 15, "path": "/2013-11-20/贪吃蛇神马的都去死1/user_lst_r.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis temporary script file is located here:\nC:\\Users\\Administrator\\.spyder2\\.temp.py\n\"\"\"\ntry:\n f = open('rank.txt','r')\nexcept:\n f = open('rank.txt','a')\n f.close()\nelse:\n user_lst = [line.strip() for line in f.readlines()]\n f.close()" }, { "alpha_fraction": 0.46676334738731384, "alphanum_fraction": 0.5, "avg_line_length": 36.10762405395508, "blob_id": "2cb5b7551b014793e078f14d6959882b23152e3c", "content_id": "df7d1972eb2da62c7c68dcf2dc63d3bfe552036e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8274, "license_type": "no_license", "max_line_length": 158, "num_lines": 223, "path": "/goodone.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "#-* coding: gbk -*\nfrom Tkinter import *\nimport random \nimport sys\nclass SnakeGame:\n \"\"\"\n It will go one more step before turning\n \"\"\"\n\n def __init__(self):\n # moving step for snake and food\n self.step = 15\n # game score\n self.gamescore = 0\n # game level\n self.gamelevel = 0\n self.leveltime = 510\n self.firststarted = True\n # to initialize the snake in the range of (x1,y1,x2,y1) \n #r=random.randrange(29, 540, step = 15)\n r = 29 + 15 * 15\n self.snakeX=[r, r + self.step, r + self.step*2]\n self.snakeY=[150,150,150]\n \n # to initialize the moving direction\n # self.snakeMove[1]=up [2]=down [3]=left [4]=right; [0]=dx, [1]=dy\n self.snakeDirection = 3 #go left normally\n self.snakeMove = [[], [0, -self.step], [0, self.step], [-self.step, 0], [self.step, 0]]\n\n # to draw the game frame\n window = Tk()\n window.geometry(\"600x400+10+10\")\n window.maxsize(600,400)\n window.minsize(600,400)\n window.title(\"snake game\")\n self.frame1=Frame(window, borderwidth = 4, relief = RIDGE)\n self.frame2=Frame(window, borderwidth = 2, relief = RAISED, bg = \"white\")\n self.canvas=Canvas(self.frame1, width = 600, height = 368, bg = \"yellow\")\n self.score_label=Label(self.frame2)\n \n self.frame1.pack()\n self.frame2.pack(fill=BOTH)\n self.score_label.pack(side=LEFT)\n self.canvas.pack(fill=BOTH)\n \n self.draw_wall()\n self.draw_score()\n self.draw_food()\n self.draw_snake()\n \n self.play()\n \n window.mainloop()\n \"=== View Part ===\" \n def draw_wall(self):\n \"\"\"\n get a 570*345 game area:\n (14,15) (584,15)\n (14,360) (584,360)\n \"\"\"\n self.canvas.create_line(11,10,\n 11,360, width = 6, fill = \"blue\")\n self.canvas.create_line(8,364,\n 590,364, width = 8, fill = \"blue\")\n self.canvas.create_line(587,10,\n 587,360, width = 6, fill = \"blue\")\n self.canvas.create_line(8,12,\n 590,12, width = 6, fill = \"blue\")\n def draw_score(self):\n #self.score() # score model\n self.score_label.config(text = \"Level: \" + str(self.gamelevel) \\\n + \" Score: \" + str(self.gamescore)) # score view\n \n def draw_food(self):\n self.canvas.delete(\"food\")\n self.foodx,self.foody=self.random_food() #food model\n self.canvas.create_rectangle(self.foodx, self.foody, \n self.foodx + self.step, self.foody + self.step, fill=\"red\" , tags=\"food\") #food view\n\n def draw_snake(self):\n x,y=self.snake() # snake model\n if not self.isdead():\n self.canvas.delete(\"snake\")\n for i in range(0,len(x)): # snake view\n self.canvas.create_rectangle(x[i], y[i],\n x[i]+self.step, y[i]+self.step, \\\n fill = \"orange\", tags = \"snake\")\n else:\n self.gameover()\n \"=== Model Part ===\"\n # food model\n def random_food(self): \n while True:\n x, y = random.randrange(14,570, step = 15), random.randrange(15, 346, step = 15)\n if not (x in self.snakeX) and not (y in self.snakeY):\n return x, y\n # snake model\n def snake(self, mode = \"forward\"):\n \"\"\" \n Calculate next position of every part according to direction.\n \"\"\" \n length = len(self.snakeX)\n if mode == \"back\":\n for i in range(0, len(self.snakeX)-1):\n self.snakeX[i] = self.snakeX[i+1]\n self.snakeY[i] = self.snakeY[i+1]\n self.snakeX[len(self.snakeX)-1] -= self.snakeMove[self.snakeDirection][0]\n self.snakeY[len(self.snakeY)-1] -= self.snakeMove[self.snakeDirection][1]\n else:\n for i in range(length-1, 0, -1):\n self.snakeX[i] = self.snakeX[i-1]\n self.snakeY[i] = self.snakeY[i-1]\n self.snakeX[0] += self.snakeMove[self.snakeDirection][0]\n self.snakeY[0] += self.snakeMove[self.snakeDirection][1]\n if self.isdead():\n self.gameover()\n return self.snakeX, self.snakeY\n \n #score model \n def score(self):\n \"\"\"\n When the snake scores.\n \"\"\"\n self.gamescore += 10\n self.gamelevel = self.gamescore / 100\n if self.gamelevel > 50:\n self.gamelevel = 50\n self.leveltime = 510 - self.gamelevel * 10\n self.snakeX.append(2*self.snakeX[0] - self.snakeX[1])\n self.snakeY.append(2*self.snakeY[0] - self.snakeY[1])\n \n \n \"=== Control Part ===\" \n def iseated(self):\n self.score()\n self.draw_score()\n self.draw_food()\n \n def isdead(self):\n if self.snakeX[0] < 14 or self.snakeX[0] > 569 or self.snakeY[0] < 15 or self.snakeY[0] > 345:\n return True\n \n length = len(self.snakeX)\n for i in range(length-1, 0, -1):\n if self.snakeX[i] == self.snakeX[0] and self.snakeY[i] == self.snakeY[0]:\n return True\n \n return False\n \n def move(self,event):\n\n if (event.keycode == 38 or event.char == 'w') \\\n and (self.snakeDirection == 3 or self.snakeDirection == 4):\n #self.back()\n self.snake(\"back\")\n self.snakeDirection = 1\n self.draw_snake()\n self.canvas.update()\n \n elif (event.keycode == 40 or event.char == 's') \\\n and (self.snakeDirection == 3 or self.snakeDirection == 4):\n self.snake(\"back\")\n self.snakeDirection = 2\n self.draw_snake()\n self.canvas.update()\n \n elif (event.keycode == 37 or event.char == 'a') \\\n and (self.snakeDirection == 1 or self.snakeDirection == 2 or self.firststarted):\n self.snake(\"back\")\n self.snakeDirection = 3\n self.draw_snake()\n self.canvas.update() \n \n elif (event.keycode == 39 or event.char == 'd') \\\n and (self.snakeDirection == 1 or self.snakeDirection == 2):\n self.snake(\"back\")\n self.snakeDirection = 4\n self.draw_snake()\n self.canvas.update()\n \n self.firststarted = False\n \n def play(self):\n self.canvas.config(highlightthickness = 0)\n self.canvas.focus_set()\n self.firststarted = True\n self.canvas.bind(\"<Key>\", self.move)\n while not self.isdead():\n self.draw_score()\n if self.snakeX[0]+self.snakeMove[self.snakeDirection][0]== self.foodx and self.snakeY[0]+self.snakeMove[self.snakeDirection][1] == self.foody:\n self.iseated()\n self.draw_snake()\n self.canvas.update()\n \n self.canvas.after(self.leveltime)\n \n def gameover(self):\n self.canvas.create_text(300, 150, \\\n text = \"You lose! Score: \" + str(self.gamescore) \\\n + \", level: \" + str(self.gamelevel), \\\n font = \"Times 20 bold\", tags = \"lose\")\n self.score_label.config(text = \"Press <q> to quit, any other key to restart...\")\n self.canvas.bind(\"<Key>\", self.restart)\n def restart(self,event):\n if event.char != 'q':\n self.canvas.delete(ALL)\n #re-initialize\n self.gamescore = 0\n self.gamelevel = 0\n self.leveltime = 510\n r = 29 + 15 * 15\n self.snakeX=[r, r + self.step, r + self.step*2]\n self.snakeY=[150,150,150]\n self.snakeDirection = 3\n self.draw_wall()\n self.draw_score()\n self.draw_food()\n self.draw_snake()\n \n self.play()\n else:\n sys.exit()\nSnakeGame()" }, { "alpha_fraction": 0.38513514399528503, "alphanum_fraction": 0.40878379344940186, "avg_line_length": 16.47058868408203, "blob_id": "2488b3cc6654b774effd374da025ddf936451c72", "content_id": "a8c0aa2757ec3f118b70322b568b33272d53735a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 40, "num_lines": 17, "path": "/2013-10-29/迭代.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "def f(n):\n \"\"\"\n this fucation is to computer the sum\n \"\"\"\n try:\n n=int(n)\n if n<0:\n print(\"error\")\n elif n==1:\n return 1\n else:\n result=n+f(n-1)\n return result\n except:\n print(\"error\")\n \nprint f(100)" }, { "alpha_fraction": 0.5167064666748047, "alphanum_fraction": 0.5704057216644287, "avg_line_length": 22.571428298950195, "blob_id": "2528e6a333bfebf3e41b6ebe9c1c17872ff3aaf6", "content_id": "7078d71df70af9061a7546ac0a90767b91cec047", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 838, "license_type": "no_license", "max_line_length": 58, "num_lines": 35, "path": "/2013-11-14/1130310226_段艺_4.2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is the Pig Latin game. \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-11-14 21:51:03\n\"\"\"\n#import the re model\nimport re\n#set an empty list\nnew = []\n#make the words lower\ns = raw_input().lower()\ns = s.split()\n#set thr re rule\nm1 = re.compile(r'([aeiou])(\\w*)')\nm2 = re.compile(r'(qu)(\\w*)')\nm3 = re.compile(r'([aeiouy])(\\w*)\\b')\n#change the word\nfor i in s:\n if re.match(m1,i):\n new.append(re.match(m1,i).group(0) + 'hay')\n elif re.match(m2,i):\n new.append((re.match(m2,i).group(0) + 'quay')[2:])\n elif re.search(m3,i[1:]):\n k = re.search(m3,i[1:]).start(0) + 1\n new.append((i + i[:k] + 'ay')[k:])\n else :\n new.append(i + 'ay')\n#print the new string\nprint str(' '.join(new))\n \n" }, { "alpha_fraction": 0.5493406057357788, "alphanum_fraction": 0.6175534129142761, "avg_line_length": 24.287355422973633, "blob_id": "d3053582e2c96486ba78df765780259b0b09e6eb", "content_id": "7ea8edf436fca3f7fd6295ef671c3fb5ac18fd1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2199, "license_type": "no_license", "max_line_length": 115, "num_lines": 87, "path": "/2013-10-29/test.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 29 19:12:57 2013\n\n@author: Administrator\n\"\"\"\nfrom Tkinter import *\nimport random\n\ndef draw_wall():\n canvas.create_line((10,10),(10,365),(10,365),(590,365),(590,365),(590,10),(590,10),(10,10),width=5,fill='blue')\n \ndef draw_score():\n# score() # score model\n score_label.config(text='Score:'+str(gamescore)) # score view\n \ndef draw_food():\n canvas.delete(\"food\")\n foodx,foody=random_food() #food model\n canvas.create_rectangle(foodx,foody,foodx+15,foody+15,fill='red',tags=\"food\") #food view\ndef random_food(): \n foodx=random.randrange(15,575,15)\n foody=random.randrange(15,350,15)\n return foodx,foody\n\ndef move(event):\n if (event.keycode == 37 or event.keycode == 65) and moveRight==False:\n moveLeft = True\n moveRight = False\n moveUp = False\n moveDown = False\n if (event.keycode == 39 or event.keycode == 68) and moveLeft==False:\n moveLeft = False\n moveRight = True\n moveUp = False\n moveDown = False\n if (event.keycode == 38 or event.keycode == 87) and moveDown==False:\n moveLeft = False\n moveRight = False\n moveUp = True\n moveDown = False\n if (event.keycode == 40 or event.keycode == 83) and moveUp==False:\n moveLeft = False\n moveRight = False\n moveUp = False\n moveDown = True\n\n new_x = snackX[0]\n new_y = snackY[0]\n\n if moveLeft == True:\n new_x = snackX[0]-15\n if moveRight == True:\n new_x = snackX[0]+15\n if moveUp == True:\n new_y = snackY[0]-15\n if moveDown == True:\n new_y = snackY[0]+15\n return new_x,new_y\n\n \nstap=15\ngamescore=0\n\n\nwindow = Tk()\nwindow.geometry(\"600x400+10+10\")\n#window.maxsize(600,400)\n#window.minsize(600,400)\nwindow.title(\"Snake game\")\n\nframe1=Frame(window,height=370,width=600)\nframe2=Frame(window,height=30,width=600)\ncanvas=Canvas(frame1,bg='yellow',width=600,height=370)\nscore_label=Label(frame2,text='Score:'+str(gamescore))\n\n\nframe1.pack()\nframe2.pack(fill=BOTH)\nscore_label.pack(side=LEFT)\ncanvas.pack(fill=BOTH)\n\ndraw_wall()\ndraw_score()\ndraw_food()\n\nwindow.mainloop()" }, { "alpha_fraction": 0.6301518678665161, "alphanum_fraction": 0.7342733144760132, "avg_line_length": 27.78125, "blob_id": "4d73d26018b43ea24539e2b3e546fb7f871586b1", "content_id": "1daaceb1edb143f16edcae71c9101e9050da3c17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 922, "license_type": "no_license", "max_line_length": 52, "num_lines": 32, "path": "/2013-09-27/五角星.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is to draw a five-pointed star\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-9-27 03:35:50\n\"\"\"\n\nimport turtle #import turtle module\nturtle.pensize(2) #set pen size to 2 pixels\nturtle.penup() #pull the pen up\nturtle.goto(-50,0) #set the pen to (-50,0)\nturtle.pendown() #pull the pen down\n\nturtle.forward(200) #put the pen forward 200 pixels\nturtle.right(144) #turn the pen right 144 degrees\n\nturtle.forward(200) #put the pen forward 200 pixels\nturtle.right(144) #turn the pen right 144 degrees\n\nturtle.forward(200) #put the pen forward 200 pixels\nturtle.right(144) #turn the pen right 144 degrees\n\nturtle.forward(200) #put the pen forward 200 pixels\nturtle.right(144) #turn the pen right 144 degrees\n\nturtle.forward(200) #put the pen forward 200 pixels\nturtle.right(144) #turn the pen right 144 degrees\n\n" }, { "alpha_fraction": 0.49240121245384216, "alphanum_fraction": 0.6048632264137268, "avg_line_length": 17.33333396911621, "blob_id": "f5525764147633e5daa0e4df04fd30d2acd2dc81", "content_id": "a169c5d0e229eb8f148903b12e7c2031ec61a1eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 50, "num_lines": 18, "path": "/mould/parking.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 25 14:59:47 2013\n\n@author: Administrator\n\"\"\"\nimport random\ndef parking(beg,end):\n if end-beg<1:\n return 0\n else:\n x=random.uniform(beg,end-1)\n return parking(beg,x)+parking(x+1,end)+1 \ns=0\n\nfor _ in range(0,100000):\n s+=parking(0,100)\nprint s/100000.0" }, { "alpha_fraction": 0.5658198595046997, "alphanum_fraction": 0.6836027503013611, "avg_line_length": 24, "blob_id": "7302aa2ed347fc7168b3c7697cc1a6a1c41ddcb0", "content_id": "99910bfdaf3631004da0451539b33fed927d1e5c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1299, "license_type": "no_license", "max_line_length": 115, "num_lines": 52, "path": "/2013-11-20/贪吃蛇神马的都去死1/firstpage.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 18 23:25:09 2013\n\n@author: Administrator\n\"\"\"\nfrom Tkinter import *\nimport random,sys\n\ndef draw_wall():\n '''\n This function is to draw the wall.\n '''\n canvas.create_line((13,13),(13,363),(13,363),(588,363),(588,363),(588,13),(588,13),(13,13),width=5,fill='blue')\n\ndef first_page():\n draw_wall()\n canvas.create_text(200,100,text='Snake',font='Times 50 bold')\n canvas.create_text(200,200,text='Game',font='Times 50 bold ')\n canvas.create_text(450,100,text='1.New ',font='Courier 25 bold')\n canvas.create_text(450,150,text='2.Load',font='Courier 25 bold')\n canvas.create_text(450,200,text='3.Rank',font='Courier 25 bold')\n canvas.create_text(450,250,text='4.Exit',font='Courier 25 bold')\n canvas.focus_set()\n# canvas.bind('<Key>',pass)\n\n\ngamescore=0\nlevel=1\n\nwindow = Tk()\nwindow.geometry(\"600x400+10+10\")\nwindow.maxsize(600,400)\nwindow.minsize(600,400)\nwindow.title(\"Snake game\")\n\nframe1=Frame(window,height=370,width=600)\nframe2=Frame(window,height=30,width=600)\ncanvas=Canvas(frame1,bg='yellow',width=600,height=370)\nscore_label=Label(frame2,text='Score: %d Level:%d'%(gamescore,level))\n\nframe1.pack()\nframe2.pack(fill=BOTH)\nscore_label.pack(side=LEFT)\ncanvas.pack(fill=BOTH)\n\nfirst_page()\n\n\n\n\nwindow.mainloop()" }, { "alpha_fraction": 0.550595223903656, "alphanum_fraction": 0.5982142686843872, "avg_line_length": 16.6842098236084, "blob_id": "62d722d1458bef7f514e40ae3ef487f9d028be2d", "content_id": "4ef9605c10c3fdecb97c5e4410fff35fc3f94d7b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 336, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/2013-11-20/贪吃蛇神马的都去死1/user_lst_w.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 18 21:19:30 2013\n\n@author: Administrator\n\"\"\"\nf = open('rank.txt','r')\nlst = [line.strip() for line in f.readlines()]\nf.close()\n\n\n\nlst.sort(key = lambda x:int(x.split()[1]),reverse=True)\n\nfor i in range(0,len(lst)-1):\n lst[i] += '\\n'\nf = open('rank.txt','w')\nf.writelines(lst)\nf.close()\n" }, { "alpha_fraction": 0.5065359473228455, "alphanum_fraction": 0.5457516312599182, "avg_line_length": 20.89285659790039, "blob_id": "4a1130944190240095f03541502221bd61786d18", "content_id": "e57ebe5f763b513a4f76272697e5dbef62fc4b0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 612, "license_type": "no_license", "max_line_length": 117, "num_lines": 28, "path": "/2013-11-20/贪吃蛇神马的都去死1/test2.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 20 13:08:51 2013\n\n@author: Administrator\n\"\"\"\nfrom Tkinter import *\nletter_lst = ['a','b','c','d','e','f','g','h','i''j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']\ncanvas = Canvas(root)\n\nword_order = ''.join(word_lst_random)\n\n\n\n\ni = 0\n\ndef now_letters():\n now_letter = []\n now_letter.append(word_order[i])\n now_letter.append(lst[random.range(0,26)])\n now_letter.append(lst[random.range(0,26)])\n \n \ndef draw_letters():\n for i in range(0,3):\n \n canvas.create_text(foodX[i] + 7,foodY[i] + 7,text = now_letter[i])" }, { "alpha_fraction": 0.4740259647369385, "alphanum_fraction": 0.5562770366668701, "avg_line_length": 17.520000457763672, "blob_id": "895b05b6c05a5a18dc44a09a068ec2637b15672b", "content_id": "b2c2ef4615895ae71b1227cc8d3e61bc4d4b8b45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 462, "license_type": "no_license", "max_line_length": 59, "num_lines": 25, "path": "/mould/is_prime.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is a mould to jurdge prime.\nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-10-16 10:49:40\n\"\"\"\ndef prime(n):\n \"\"\"\n This fuction is to jurdje if the vaule of i is a prime.\n If ture the fuction will return Ture.\n \"\"\"\n j=int(n**0.5)+1\n m=0\n i=3\n while i<j:\n if n%i==0:\n m+=1\n break\n i+=2\n return m==0" }, { "alpha_fraction": 0.6306098699569702, "alphanum_fraction": 0.6317606568336487, "avg_line_length": 26.1875, "blob_id": "0c373333fef0f86ad96344a862a298129ad4b0d7", "content_id": "a94ee6beaf1d9aa77d4a28cf71818bf2fa876f2a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 869, "license_type": "no_license", "max_line_length": 93, "num_lines": 32, "path": "/2013-11-27/6.1/1130310226_段艺_6.1/1130310226_段艺_6.1/mysite/addr_book/views.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "from django.template import Context, RequestContext\nfrom django.shortcuts import render_to_response\nfrom models import People\n\ndef add(req):\n\tempty = []\n\tif req.POST:\n\t\tpost = req.POST\n\t\tfor i in post.keys():\n\t\t\tif post[i] == '':\n\t\t\t\tempty.append(i + ' shouldn\\'t be empty!')\n\t\tif len(empty) == 0:\t\t\t\n\t\t\tnew_people = People(\n\t\t\t\tstudent_num = post[\"student_num\"],\n\t\t\t\tname = post[\"name\"],\n\t\t\t\tphone = post[\"phone\"],\n\t\t\t\temail = post[\"email\"],\n\t\t\t\tQQ = post[\"QQ\"],\n\t\t\t\taddress = post[\"address\"],\n\t\t\t\tbirthday = post[\"birthday\"],\n\t\t\t\t)\n\t\t\tif post[\"sex\"] == \"M\":\n\t\t\t\tnew_people.sex = True\n\t\t\telse:\n\t\t\t\tnew_people.sex = False\n\t\t\tnew_people.save()\n\treturn render_to_response(\"add.html\",{'empty':empty},context_instance = RequestContext(req))\n\ndef show(req):\n\tpeople_lst = People.objects.all()\n\tc = Context({\"people_lst\":people_lst})\n\treturn render_to_response(\"show.html\",c)" }, { "alpha_fraction": 0.5982906222343445, "alphanum_fraction": 0.621082603931427, "avg_line_length": 15.7619047164917, "blob_id": "97fd2d8268ca509a4e268639bdfffe828ddc7e61", "content_id": "11cf76e583cb677b81aacc675ba63212eaab4e5d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 43, "num_lines": 21, "path": "/2013-11-20/贪吃蛇神马的都去死1/.temp.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis temporary script file is located here:\nC:\\Users\\Administrator\\.spyder2\\.temp.py\n\"\"\"\ndate = open('111.txt','r')\n\nlst = [i.strip() for i in date.readlines()]\n\nfor i in lst:\n lst[lst.index(i)] = i + ' ok\\n'\ndate.close()\n\nprint lst\ndate = open('111.txt','w')\nfor i in lst:\n date.write(i)\n\ndate.close()" }, { "alpha_fraction": 0.5597147941589355, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 18.20689582824707, "blob_id": "9043680d1c3928e32c5705764e1ba1ea8213a54f", "content_id": "c3ceece7468777239ae940f12a654c37f1af24b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 561, "license_type": "no_license", "max_line_length": 38, "num_lines": 29, "path": "/2013-10-9/estimating_pi_main.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is for estimating pi \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.1\ndate:2013-10-11 17:59:33\n\"\"\"\n#import the necessary function\nfrom math import sqrt\nfrom math import fabs\nfrom math import factorial\n#set the original value\npi1=0\nk=term=0\na=(2*sqrt(2)/9801)\n#computing pi\nwhile True:\n b=factorial(4*k)*(1103+26390*k)\n c=((factorial(k))**4)*(396**(4*k))\n term=a*b/c\n if fabs(term)<1e-15 :break\n pi1 += term\n k += 1\n#output the result\nprint(1/pi1) \n" }, { "alpha_fraction": 0.4442887306213379, "alphanum_fraction": 0.4751226305961609, "avg_line_length": 24.709091186523438, "blob_id": "0647a48493f60b915677d0921d97df9c6baad68b", "content_id": "fcfe2e389484701d622a7a838736b90b52df4aaf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1427, "license_type": "no_license", "max_line_length": 50, "num_lines": 55, "path": "/2013-11-10/1130310226_段艺_4.1.py", "repo_name": "XianYX/Python-", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\npython 2.7\nThis program is the Pig Latin game. \nIt's made by Duan Yi.\nStudent number:1130310226\nE-mail:[email protected]\nversion:1.0\ndate:2013-11-10 15:54:29\n\"\"\"\n#set the list of vowel and an empty list\nvowel1 = ['a','e','i','o','u']\nvowel2 = ['a','e','i','o','u','y']\nnew = []\n#let the user to input the string\nStr = raw_input()\n#make the letter in lowercase and split the string\ns = Str.lower()\ns = s.split()\n#loop the list to do with the words\nfor i in s:\n #if the str is only one letter\n if len(i) == 1:\n if i in vowel1:\n new_s = i + 'hay'\n new.append(new_s)\n else:\n new_s = i + 'ay'\n new.append(new_s)\n #the first letter is vowel\n elif i[0] in vowel1:\n new_s = i + 'hay'\n new.append(new_s)\n #the first two letters are 'qu'\n elif len(i) >= 2 and i[:2] == 'qu':\n new_s = (i + 'quay')[2:]\n new.append(new_s)\n #the first letter is other letters\n else:\n n = 0\n p = i[1:]\n for j in p :\n if j in vowel2 :\n k = p.index(j) + 1 \n new_s = (i + i[:k] + 'ay')[k:]\n new.append(new_s)\n break\n else:\n n += 1\n if n == len(i) - 1:\n new_s = i + 'ay'\n new.append(new_s)\n#print the new string\nprint str(' '.join(new))\n \n" } ]
53
UmangAjw/Youtube-Downloader
https://github.com/UmangAjw/Youtube-Downloader
d88ba747a05ebe83efb565093c1b7c26978b10c9
00c40e5519e824d2b251a778d7006784b7e2578a
ca2b82b23be144bc2262b9e0445ad0cd56e51f5b
refs/heads/master
2022-11-30T15:58:11.744240
2020-08-11T09:20:35
2020-08-11T09:20:35
286,690,407
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5680093765258789, "alphanum_fraction": 0.5863874554634094, "avg_line_length": 31.954225540161133, "blob_id": "ef02ed76677b83a3a9cfab4169c64d61f73004d2", "content_id": "d4a3ebfbf98138a2b044532dd5665af50c658d72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9359, "license_type": "no_license", "max_line_length": 105, "num_lines": 284, "path": "/App/YTdownloader.py", "repo_name": "UmangAjw/Youtube-Downloader", "src_encoding": "UTF-8", "text": "from pytube import YouTube\nfrom tkinter import *\nimport tkinter.ttk as ttk\nfrom tkinter.filedialog import askdirectory\nfrom PIL import Image, ImageTk, ImageSequence\nfrom threading import *\nfrom tkinter.messagebox import askyesno\nimport urllib.request\n\nvideo_size = 0\naudio_size = 0\n# Video downloading Thread\n\n\ndef downloadVideoThread():\n thread = Thread(target=videoDownloader)\n thread.start()\n\n\n# Video Download Progress\n\n\ndef downloadVideoProgress(chunk, file_handle, remaining):\n global download_status\n file_downloaded = video_size - remaining\n percentage = (file_downloaded/video_size) * 100\n download_status.config(text='{:00.0f} % downloaded'.format(percentage))\n\n# Video Downloader\n\n\ndef videoDownloader():\n global video_size, download_status\n video_btn.config(state=DISABLED)\n download_status.place(x=230, y=245)\n try:\n try_url = url.get()\n path = askdirectory()\n yt = YouTube(try_url, on_progress_callback=downloadVideoProgress)\n video = yt.streams.filter(\n progressive=True, subtype='mp4').order_by(\"resolution\").last()\n video_size = video.filesize\n video.download(path)\n download_status.place(x=155, y=245)\n download_status.config(text='Video Downloaded Successfully...')\n result = askyesno('Youtube Video Downloader',\n 'Do you want to download another Video?')\n if (result == 1):\n url.delete(0, END)\n video_btn.config(state=NORMAL)\n download_status.config(tect=' ')\n else:\n root.destroy()\n except Exception as e:\n if(str(e) == \"'cipher'\"):\n download_status.place(x=100, y=245)\n download_status.config(\n text='Failed! Most Likely, Copyrighted Content!!!')\n else:\n download_status.config(\n text='Failed!!!')\n result = askyesno('Youtube Video Downloader',\n 'Try Downloading Again?')\n if (result == 1):\n url.delete(0, END)\n video_btn.config(state=NORMAL)\n download_status.config(tect=' ')\n else:\n root.destroy()\n\n# Audio downloading Thread\n\n\ndef downloadAudioThread():\n thread = Thread(target=audioDownloader)\n thread.start()\n\n\n# Audio Download Progress\n\n\ndef downloadAudioProgress(chunk, file_handle, remaining):\n global download_status\n file_downloaded = audio_size - remaining\n percentage = (file_downloaded/audio_size) * 100\n download_status.config(text='{:00.0f} % downloaded'.format(percentage))\n\n# Audio Downloader\n\n\ndef audioDownloader():\n global audio_size, download_status\n audio_btn.config(state=DISABLED)\n download_status.place(x=230, y=245)\n\n try:\n try_url = url.get()\n path = askdirectory()\n yt = YouTube(try_url, on_progress_callback=downloadAudioProgress)\n audio = yt.streams.filter(\n only_audio=True).first()\n audio_size = audio.filesize\n audio.download(path)\n download_status.place(x=155, y=245)\n download_status.config(text='Audio Downloaded Successfully...')\n result = askyesno('Youtube Downloader',\n 'Do you want to download anything else from YouTube?')\n if (result == 1):\n url.delete(0, END)\n audio_btn.config(state=NORMAL)\n download_status.config(text=' ')\n else:\n root.destroy()\n except Exception as e:\n if(str(e) == \"'cipher'\"):\n download_status.place(x=100, y=245)\n download_status.config(\n text='Failed! Most Likely, Copyrighted Content!!!')\n else:\n download_status.config(\n text='Failed!!!')\n result = askyesno('Youtube Downloader',\n 'Try Downloading Again?')\n if (result == 1):\n url.delete(0, END)\n audio_btn.config(state=NORMAL)\n download_status.config(text=' ')\n else:\n root.destroy()\n\n# Thumbnail Downloader\n\n\ndef thumbnailDownloader():\n global download_status\n thumbnail_btn.config(state=DISABLED)\n download_status.place(x=230, y=245)\n\n try:\n try_url = url.get()\n path = askdirectory()\n yt = YouTube(try_url)\n thumb_url = str(yt.thumbnail_url)\n url_title = str(yt.title)\n if(len(url_title) > 10):\n url_title = url_title[:9]\n url_title += ' Thumbnail.jpg'\n url_title = url_title.replace(' ', '_')\n path += '\\\\' + url_title\n urllib.request.urlretrieve(thumb_url, path)\n download_status.place(x=145, y=245)\n download_status.config(text='Thumbnail Downloaded Successfully...')\n result = askyesno('Youtube Downloader',\n 'Do you want to download anything else from YouTube?')\n if (result == 1):\n url.delete(0, END)\n thumbnail_btn.config(state=NORMAL)\n download_status.config(text=' ')\n else:\n root.destroy()\n except Exception as e:\n if(str(e) == \"'cipher'\"):\n download_status.place(x=100, y=245)\n download_status.config(\n text='Failed! Most Likely, Copyrighted Content!!!')\n else:\n download_status.config(\n text='Failed!!!')\n result = askyesno('Youtube Downloader',\n 'Try Downloading Again?')\n if (result == 1):\n url.delete(0, END)\n audio_btn.config(state=NORMAL)\n download_status.config(tect=' ')\n else:\n root.destroy()\n\n# Tool tip\n\n\nclass ToolTip(object):\n\n def __init__(self, widget):\n self.widget = widget\n self.tipwindow = None\n self.id = None\n self.x = self.y = 0\n\n def showtip(self, text):\n self.text = text\n if self.tipwindow or not self.text:\n return\n x, y, cx, cy = self.widget.bbox(\"insert\")\n x = x + self.widget.winfo_rootx() + 57\n y = y + cy + self.widget.winfo_rooty() + 37\n self.tipwindow = tw = Toplevel(self.widget)\n tw.wm_overrideredirect(1)\n tw.wm_geometry(\"+%d+%d\" % (x, y))\n label = Label(tw, text=self.text, justify=LEFT,\n background=\"#fff\", relief=SOLID, borderwidth=1,\n font=(\"Lato Light\", \"10\", \"normal\"))\n label.pack(ipadx=1)\n\n def hidetip(self):\n tw = self.tipwindow\n self.tipwindow = None\n if tw:\n tw.destroy()\n\n\ndef CreateToolTip(widget, text):\n toolTip = ToolTip(widget)\n\n def enter(event):\n toolTip.showtip(text)\n\n def leave(event):\n toolTip.hidetip()\n widget.bind('<Enter>', enter)\n widget.bind('<Leave>', leave)\n\n\nif __name__ == \"__main__\":\n root = Tk()\n root.call('wm', 'iconphoto', root._w, PhotoImage(\n file=r'C:\\Users\\umang\\Desktop\\Projects\\YouTube Video Downloader\\img\\YTD_logo.png'))\n root.wm_geometry(\"600x400\")\n root.title('YouTube Downloader')\n root['bg'] = 'white'\n root.resizable(0, 0)\n\n img = Image.open(\n r'C:\\Users\\umang\\Desktop\\Projects\\YouTube Video Downloader\\img\\YTD_banner_fit.png') # Image Logo\n img = img.resize((640, 80), Image.ANTIALIAS) # Image resize\n img = ImageTk.PhotoImage(img)\n head = Label(root, image=img)\n head.config(anchor=CENTER)\n head.pack()\n\n enter_url = Label(root, text='Enter URL: ', bg=\"#fff\")\n enter_url.config(font=('Lato', 13))\n enter_url.place(x=50, y=135)\n url = Entry(root, width=38, border=1,\n relief=SUNKEN, font=('Lato Light', 12))\n # url.insert(0, 'Enter YouTube URL')\n url.place(x=150, y=135)\n\n style = ttk.Style()\n style.configure('C.TButton', font=('Lato Light', 12), borderwidth=0)\n style.map(\"C.TButton\",\n foreground=[('pressed', 'red'), ('active', '#ED1E2E')],\n background=[('pressed', '!disabled', '#ED1E2E'),\n ('active', '#ED1E2E')]\n )\n\n video_btn = ttk.Button(root, text=\"Video Download\",\n style=\"C.TButton\", command=downloadVideoThread)\n video_btn.place(x=53, y=180)\n CreateToolTip(video_btn, 'Best Quality Video')\n\n audio_btn = ttk.Button(root, text=\"Audio Download\",\n style=\"C.TButton\", command=downloadAudioThread)\n audio_btn.place(x=192, y=180)\n CreateToolTip(audio_btn, 'Best Quality Audio')\n\n thumbnail_btn = ttk.Button(root, text=\"Thumbnail Download\",\n style=\"C.TButton\", command=thumbnailDownloader)\n thumbnail_btn.place(x=338, y=180)\n CreateToolTip(thumbnail_btn, 'Best Quality Thumbnail')\n\n footer = Label(root, text=\"Made with by Umang Ajwalia\", bg=\"#fff\")\n footer.config(font=('Lato Light', 12))\n footer.place(x=185, y=370)\n\n footer_heart_img = Image.open(\n r'C:\\Users\\umang\\Desktop\\Projects\\YouTube Video Downloader\\img\\heart.png')\n footer_heart_img = footer_heart_img.resize((16, 16), Image.ANTIALIAS)\n footer_heart_img = ImageTk.PhotoImage(footer_heart_img)\n footer_heart_label = Label(root, image=footer_heart_img, bg=\"#fff\")\n footer_heart_label.place(x=265, y=374)\n\n download_status = Label(root, text='Please wait...',\n font=('Lato Light', 15), bg='white')\n root.mainloop()\n" }, { "alpha_fraction": 0.7400000095367432, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 59.344825744628906, "blob_id": "8caf4596e30555450709c0c5cacc744690118771", "content_id": "f31976e109bc042a8d879fb58b3a0cf6671f8734", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1750, "license_type": "no_license", "max_line_length": 340, "num_lines": 29, "path": "/README.md", "repo_name": "UmangAjw/Youtube-Downloader", "src_encoding": "UTF-8", "text": "# Youtube-Downloader\n## Indtroduction & Features\nYouTube Downloader is an application through which you will be able to download videos, audio corresponding to a video and thumbnail of a video. You will get the best quality which is available on that respective YouTube video. Also, thumbnail quality will be max resolution which is uploaded by the user. This application uses tkinter GUI.\n\n## Instructions to use\nObviously you need to have python in your systems. Additionally you need to have pytube. Else tkinter and other modules would already included.\n\n### Installing pytube\n```pip install pytube3```\n\n### Protips\nBydefault best quality video, audio & thumbnail. If you want to download other resolutions of video & audio you can simply get list all the streams available using\n```video = yt.streams.all()```\nTo get different resolution of thumbnail:\n- https://img.youtube.com/vi/video_id/default.jpg (For default resolution)\n- https://img.youtube.com/vi/video_id/sddefault.jpg (For standard resolution)\n- https://img.youtube.com/vi/video_id/mqdefault.jpg (For medium quality resolution)\n- https://img.youtube.com/vi/video_id/hqdefault.jpg (For high quality resolution)\n- https://img.youtube.com/vi/video_id/maxresdefault.jpg (For maximum resolution)\n\n// video_id -> is the id which is in the url query. For example: https://www.youtube.com/watch?v=video_id\n \n### App ScreenShot\n<kbd>\n <img src=\"https://user-images.githubusercontent.com/39110739/89879787-12ef0400-dbe1-11ea-91cf-7013423dbbbc.PNG\">\n</kbd>\n\n### Note \nYou won't be able to download video/audio/thumbnail of copyrighted videos. (For most of the songs produced by any Label). For thumbnail you can download via video id with the help of above mothod.\n" } ]
2
fgrizelj/pogrebno
https://github.com/fgrizelj/pogrebno
8a18ba330753c6566f6ccb66ae84ec32b08dd815
f97520a955cd65290c14d88bc0e456187acdba65
7b25c7566bb85e3f9793849de4a21a95ff1325ce
refs/heads/master
2020-11-28T08:39:08.996919
2020-11-20T11:16:07
2020-11-20T11:16:07
181,015,238
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6458557844161987, "alphanum_fraction": 0.6465733647346497, "avg_line_length": 29.30434799194336, "blob_id": "1d906173c0c0670c0faed50c588bebc05a3e57a6", "content_id": "e740d27b352e20e5a6d24e7ce5b90a828b611a6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2787, "license_type": "no_license", "max_line_length": 255, "num_lines": 92, "path": "/src/components/table/Table.js", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\nimport TableHeaderRow from \"./TableHeaderRow\";\nimport TableDataRow from \"./TableDataRow\";\nimport SimpleFilter from \"../filters/simple/SimpleFilter\"\nimport {toJSDateFromCustomDate} from \"../../util/Util\";\n\nimport \"./Table.css\";\n\nfunction Table(props) {\n\n // Assumptions:\n // props.data is an array of objects\n // each object has an id property which can be used as react key\n\n //Error checking\n if (props.data.length > 0) {\n const firstRow = props.data[0];\n props.columnDefinitions.forEach(columnDefinition => {\n if (firstRow[columnDefinition.name] === undefined) {\n console.error(\"Column name \" + columnDefinition.name + \" doesnt exist in data\");\n }\n });\n }\n\n //Hooks\n const [data, setData] = useState(props.data);\n const [columnDefinitions, setcolumnDefinitions] = useState(props.columnDefinitions);\n \n useEffect(() => {\n setData(props.data);\n setcolumnDefinitions(props.columnDefinitions);\n }, [props.data]);\n\n //Rows clicked callback\n function cellClicked(cellData, rowData) {\n props.cellClicked(cellData, rowData);\n }\n\n //Filter matched indexes callback\n function onMatchedIndexes(mathcedIndexes) {\n let filteredData = mathcedIndexes.map(mi => props.data[mi]);\n setData(filteredData);\n }\n \n //Rendering\n let tableDataRows = [];\n let filter;\n let filterClass = \"no-filter\";\n let title;\n let titleClass = \"no-title\";\n let footer;\n let footerClass = \"no-footer\";\n\n data.forEach(rowData => {\n tableDataRows.push(<TableDataRow key={rowData.id} rowData = {rowData} columnDefinitions = {columnDefinitions} cellClicked = {props.cellClicked !== undefined ? cellClicked : undefined} highlightedRowHover = {props.highlightedRowHover !== undefined}/>);\n });\n\n if (props.filter !== undefined) {\n filter = <SimpleFilter list = {props.data.map(item => {let result = \"\"; Object.keys(item).forEach(k => result = result + \" \" + item[k].toString()); return result;})} onMatchedIndexes = {onMatchedIndexes}/>\n filterClass = \"filter\";\n }\n\n if (props.title !== undefined) {\n title = <div className = {\"table-title\"}>{props.title}</div>\n titleClass = \"title\";\n }\n\n if (props.footerFunc !== undefined) {\n footer = <div className = {\"table-footer\"}>{props.footerFunc(data)}</div>\n footerClass = \"footer\";\n }\n\n return (\n <>\n {title}\n {filter}\n <div className = {\"table-scroll-container \" + filterClass + \" \" + titleClass + \" \" + footerClass}>\n <table className = {\"table\"}>\n <thead>\n <TableHeaderRow columnDefinitions = {columnDefinitions}/>\n </thead>\n <tbody>\n {tableDataRows}\n </tbody>\n </table>\n </div>\n {footer}\n </>\n );\n}\n\nexport default Table;" }, { "alpha_fraction": 0.7426160573959351, "alphanum_fraction": 0.7426160573959351, "avg_line_length": 58.25, "blob_id": "b17d7379f93fd52c2c8a8ba76a9f2168cef3fce8", "content_id": "906044eca02551751eef620e91eff224bad821d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 237, "license_type": "no_license", "max_line_length": 75, "num_lines": 4, "path": "/README.md", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "- finish setup for [pogrebno_api](https://github.com/fgrizelj/pogrebno_api)\n- `npm install` - installs node modules\n- `npm run-script watch` - builds the project in /home/user/public_html\n- App should now work, check at localhost/~user/\n" }, { "alpha_fraction": 0.7116666436195374, "alphanum_fraction": 0.7116666436195374, "avg_line_length": 30.63157844543457, "blob_id": "0d9b7cbfe82585dac3bd1d7666a49307cd839220", "content_id": "2439f14ea60365beabdc5fd8a4f1426382fac83d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 600, "license_type": "no_license", "max_line_length": 154, "num_lines": 19, "path": "/src/components/table/TableDataCell.js", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\n\nimport \"./TableDataCell.css\"\n\nfunction TableDataCell(props) {\n let customClasses = \"\";\n if (props.columnDefinition.cellCustomClassFunc !== undefined) {\n customClasses = \" \" + props.columnDefinition.cellCustomClassFunc(props.cellData);\n }\n\n function onClick() {\n props.cellClicked({[props.columnDefinition.name]: props.cellData});\n }\n return (\n <td className = {\"table-data-cell\" + customClasses} onClick = {props.cellClicked !== undefined ? onClick : undefined}>{props.cellData.toString()}</td>\n );\n}\n\nexport default TableDataCell;" }, { "alpha_fraction": 0.6183485984802246, "alphanum_fraction": 0.6256880760192871, "avg_line_length": 24.9761905670166, "blob_id": "ee5df6e27056a0978b19e8adb7f1d9ec9c3add87", "content_id": "e629f1c508f70ecf96a53a86bf67e9562f050913", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1091, "license_type": "no_license", "max_line_length": 104, "num_lines": 42, "path": "/src/components/filters/simple/SimpleFilter.js", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\n\nimport \"./SimpleFilter.css\"\n\nfunction SimpleFilter(props) {\n let applyFilterTimeoutId = null;\n\n function onInput(e) {\n if (applyFilterTimeoutId > -1) {\n clearTimeout(applyFilterTimeoutId);\n applyFilterTimeoutId = -1;\n }\n\n const filters = e.target.value.split(\" \");\n applyFilterTimeoutId = setTimeout(applyFilters, 500, filters);\n }\n\n function applyFilters(filters) {\n let matchedIndexes = []\n for (let i = 0; i < props.list.length; i++) {\n const listItem = props.list[i];\n let filterMatchCount = 0;\n for (let j = 0; j < filters.length; j++) {\n const filter = filters[j];\n if(listItem.toLowerCase().includes(filter.toLowerCase())) {\n filterMatchCount++;\n }\n }\n if(filterMatchCount === filters.length) {\n matchedIndexes.push(i);\n }\n }\n\n props.onMatchedIndexes(matchedIndexes);\n }\n\n return (\n <input type = \"text\" className = {\"simple-filter\"} placeholder = \"Pretraži...\" onInput = {onInput}/>\n );\n}\n\nexport default SimpleFilter;" }, { "alpha_fraction": 0.6239837408065796, "alphanum_fraction": 0.6310975551605225, "avg_line_length": 37.33766174316406, "blob_id": "a83d963b775090019ab91b40216a6b26551928f1", "content_id": "5ca82b43b5dde6d1b20cfc7c366e00b272ee10f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2952, "license_type": "no_license", "max_line_length": 163, "num_lines": 77, "path": "/src/App.js", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\nimport Subjects from \"./components/Subjects\";\nimport Subject from \"./components/Subject\";\nimport Finances from \"./components/Finances\";\nimport Objects from \"./components/Objects\";\nimport Object from \"./components/Object\";\n\nimport \"./App.css\";\n\nfunction App(props) {\n const [selectedTab, setSelectedTab] = useState(0);\n const [subjectData, setSubjectData] = useState([]);\n const [objectData, setObjectData] = useState(0);\n\n function tabChangeRequested(requestedTab, tabChangeRequestData) {\n setSelectedTab(requestedTab);\n switch(requestedTab) {\n case 1:\n setSubjectData(tabChangeRequestData);\n break;\n case 4:\n setObjectData(tabChangeRequestData);\n break;\n }\n }\n\n function tabButtonClicked(e) {\n switch (e.target.id) {\n case \"subjects-tab\":\n setSelectedTab(0);\n break;\n case \"subject-tab\":\n setSelectedTab(1);\n break;\n case \"finances-tab\":\n setSelectedTab(2);\n break;\n case \"objects-tab\":\n setSelectedTab(3);\n break;\n case \"object-tab\":\n setSelectedTab(4);\n break;\n default:\n break;\n }\n }\n\n return (\n <>\n <div className = {\"tab-buttons\"}>\n <button id = \"subjects-tab\" className = {selectedTab == 0 ? \"tab-button active\" : \"tab-button inactive\"} onClick = {tabButtonClicked}>Subjekti</button>\n {/*<button id = \"subject-tab\" className = {selectedTab == 1 ? \"tab-button active\" : \"tab-button inactive\"} onClick = {tabButtonClicked}>Subjekt</button>*/}\n <button id = \"finances-tab\" className = {selectedTab == 2 ? \"tab-button active\" : \"tab-button inactive\"} onClick = {tabButtonClicked}>Financije</button>\n <button id = \"objects-tab\" className = {selectedTab == 3 ? \"tab-button active\" : \"tab-button inactive\"} onClick = {tabButtonClicked}>Objekti</button>\n {/*<button id = \"object-tab\" className = {selectedTab == 4 ? \"tab-button active\" : \"tab-button inactive\"} onClick = {tabButtonClicked}>Objekt</button>*/}\n </div>\n <div className = {selectedTab == 0 ? \"tab-content active\" : \"tab-content inactive\"}>\n <Subjects subjectClicked = {(tabChangeRequestData) => tabChangeRequested(1, tabChangeRequestData)}/>\n </div>\n <div className = {selectedTab == 1 ? \"tab-content active\" : \"tab-content inactive\"}>\n <Subject data = {subjectData}/>\n </div>\n <div className = {selectedTab == 2 ? \"tab-content active\" : \"tab-content inactive\"}>\n <Finances/>\n </div>\n <div className = {selectedTab == 3 ? \"tab-content active\" : \"tab-content inactive\"}>\n <Objects objectClicked = {(tabChangeRequestData) => tabChangeRequested(4, tabChangeRequestData)}/>\n </div>\n <div className = {selectedTab == 4 ? \"tab-content active\" : \"tab-content inactive\"}>\n <Object data={objectData}/>\n </div>\n </>\n );\n}\n\nexport default App;\n" }, { "alpha_fraction": 0.5396305322647095, "alphanum_fraction": 0.5403754711151123, "avg_line_length": 37.36000061035156, "blob_id": "039c97820532d204a1ce50dd1bde968b132642d8", "content_id": "08c62aa1341078674e5be71e5710c2f0191b3604", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 6716, "license_type": "no_license", "max_line_length": 131, "num_lines": 175, "path": "/src/components/Subject.js", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\nimport Table from \"./table/Table\";\nimport {toCurrency, toMinorDenomination, toJSDateFromISODate} from \"../util/Util\"\n\nimport \"./Subject.css\";\n\nfunction Subject(props) {\n const [objectsData, setObjectsData] = useState([]);\n const [invoicesData, setInvoicesData] = useState([]);\n const [transfersData, setTransfersData] = useState([]);\n\n useEffect(() => {\n if (props.data.id !== undefined) {\n fetch(\"pogrebno_api/subjects/\" + props.data.id + \"/objects\")\n .then(response => response.json())\n .then(result => {\n if (result.error !== undefined) {\n console.error(result.error);\n }\n else {\n setObjectsData(result);\n }\n });\n\n fetch(\"pogrebno_api/subjects/\" + props.data.id + \"/invoices\")\n .then(response => response.json())\n .then(result => {\n if (result.error !== undefined) {\n console.error(result.error);\n }\n else {\n result.forEach(element => {\n element.debt = toCurrency(element.debt);\n element.credit = toCurrency(element.credit);\n element.invoice_date = toJSDateFromISODate(element.invoice_date);\n });\n setInvoicesData(result);\n }\n });\n\n fetch(\"pogrebno_api/subjects/\" + props.data.id + \"/transfers\")\n .then(response => response.json())\n .then(result => {\n if (result.error !== undefined) {\n console.error(result.error);\n }\n else {\n result.forEach(element => {\n element.amount = toCurrency(element.amount);\n element.transfer_date = toJSDateFromISODate(element.transfer_date);\n });\n setTransfersData(result);\n }\n });\n }\n }, [props]);\n\n function classForTransfersDataCells(value) {\n if (value < 0) {\n return \"less-than-zero\";\n }\n else if (value > 0) {\n return \"greater-than-zero\";\n }\n }\n\n function classForInvoicesDataCells(value) {\n if (value === \"Da\") {\n return \"paid\";\n }\n else if (value === \"Ne\") {\n return \"not-paid\";\n }\n else if (value === \"Dj.\") {\n return \"partial\";\n }\n }\n\n function footerFuncForInvoices(data) {\n let sumDebt = 0;\n let sumCredit = 0;\n data.forEach(item => {\n sumDebt += toMinorDenomination(item.debt);\n sumCredit += toMinorDenomination(item.credit);\n });\n return \"N: \" + data.length + \", \" + \"SumD: \" + toCurrency(sumDebt)+ \", \" + \"SumP: \" + toCurrency(sumCredit);\n }\n\n function footerFuncForTransfers(data) {\n let sumAmount = 0;\n data.forEach(item => {\n sumAmount += toMinorDenomination(item.amount);\n });\n return \"N: \" + data.length + \", \" + \"SumI: \" + toCurrency(sumAmount);\n }\n\n return (\n <>\n <div className = {\"subject-top\"}/>\n <div className = {\"subject-container\"}>\n <div className = {\"subject-info\"}>\n <div className = {\"subject-info-item\"}>\n <div className = {\"subject-info-item-title\"}>Ime</div>\n <div className = {\"subject-info-item-text\"}>{props.data.first_name} {props.data.last_name} {props.data.nick_name}</div>\n </div>\n <div className = {\"subject-info-item\"}>\n <div className = {\"subject-info-item-title\"}>Adresa</div>\n <div className = {\"subject-info-item-text\"}>{props.data.address}, {props.data.city}</div>\n </div>\n <div className = {\"subject-info-item\"}>\n <div className = {\"subject-info-item-title\"}>Kontakt</div>\n <div className = {\"subject-info-item-text\"}>{props.data.mob}</div>\n <div className = {\"subject-info-item-text\"}>{props.data.email}</div>\n </div>\n <div className = {\"subject-info-item\"}>\n <div className = {\"subject-info-item-title\"}>Oznake</div>\n <div className = {\"subject-info-item-text\"}>Id: {props.data.id}</div>\n <div className = {\"subject-info-item-text\"}>Tip: {props.data.subject_type}</div>\n </div>\n </div>\n <div className = {\"subject-tables\"}>\n <input type=\"radio\" name=\"subject-tables-tab\" id=\"subject-tables-tab-objects\" defaultChecked/>\n <label className = \"subject-tables-label\" htmlFor=\"subject-tables-tab-objects\">Objekti</label>\n <input type=\"radio\" name=\"subject-tables-tab\" id=\"subject-tables-tab-invoices\" />\n <label className = \"subject-tables-label\" htmlFor=\"subject-tables-tab-invoices\">Računi</label>\n <input type=\"radio\" name=\"subject-tables-tab\" id=\"subject-tables-tab-transfers\" />\n <label className = \"subject-tables-label\" htmlFor=\"subject-tables-tab-transfers\">Transferi</label>\n <div className = {\"subject-objects subject-tables-tab-content\"}>\n <div className = {\"subject-objects-table\"}>\n <Table data = {objectsData} title = {\"Objekti:\"} columnDefinitions = {\n [\n {name: \"object_id\", alias: \"Objekt\"},\n {name: \"field\", alias: \"Polje\"},\n {name: \"location\", alias: \"Lokacija\"},\n {name: \"object_type\", alias: \"Tip\"}\n ]\n }\n />\n </div>\n </div>\n <div className = {\"subject-invoices subject-tables-tab-content\"}>\n <div className = {\"subject-invoices-table\"}>\n <Table data = {invoicesData} footerFunc = {footerFuncForInvoices} title = {\"Računi:\"} columnDefinitions = {\n [\n {name: \"id\", alias: \"Id\"},\n {name: \"invoice_date\", alias: \"Datum\"},\n {name: \"debt\", alias: \"Duguje\"},\n {name: \"credit\", alias: \"Potražuje\"},\n {name: \"invoice_desc\", alias: \"Opis\"},\n {name: \"paid\", alias: \"Pl\", cellCustomClassFunc: classForInvoicesDataCells}\n ]\n }\n />\n </div>\n </div>\n <div className = {\"subject-transfers subject-tables-tab-content\"}>\n <div className = {\"subject-transfers-table\"}>\n <Table data = {transfersData} footerFunc = {footerFuncForTransfers} title = {\"Transferi:\"} columnDefinitions = {\n [\n {name: \"invoice_id\", alias: \"Id Računa\"},\n {name: \"transfer_date\", alias: \"Datum\"},\n {name: \"amount\", alias: \"Iznos\", cellCustomClassFunc: classForTransfersDataCells}\n ]\n }\n />\n </div>\n </div>\n </div>\n </div>\n <div className = {\"subject-bottom\"}></div>\n </>\n );\n}\n\nexport default Subject;" }, { "alpha_fraction": 0.5622879266738892, "alphanum_fraction": 0.5758603811264038, "avg_line_length": 47, "blob_id": "825e5605d2c6b00c3e23db9b78d84748d6198239", "content_id": "504e07008bb967257a02f4f6c99207266a9def8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2063, "license_type": "no_license", "max_line_length": 334, "num_lines": 43, "path": "/tools/svg2jsx.py", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\nimport re\n\ndef main():\n tree = ET.parse('objects.svg')\n inSVG = tree.getroot()\n outSVG = ET.Element(\"svg\", {\"viewBox\": inSVG.get(\"viewBox\")})\n for group in inSVG.findall(\"{http://www.w3.org/2000/svg}g\"):\n gElem = ET.SubElement(outSVG, \"g\")\n for rect in group.findall(\"{http://www.w3.org/2000/svg}rect\"):\n style = parseStyle(rect.get(\"style\"))\n ET.SubElement(gElem, \"rect\", {\"x\": rect.get(\"x\"), \"y\": rect.get(\"y\"), \"width\": rect.get(\"width\"), \"height\": rect.get(\"height\"), \"id\": rect.get(\"id\"), \"style\": \"{{\" + style + \"}}\", \"onClick\": \"{objectClicked}\", \"onMouseEnter\": \"{objectMouseEnter}\", \"onMouseMove\": \"{objectMouseMove}\", \"onMouseLeave\": \"{objectMouseLeave}\"})\n for path in group.findall(\"{http://www.w3.org/2000/svg}path\"):\n style = parseStyle(path.get(\"style\"))\n ET.SubElement(gElem, \"path\", {\"d\": path.get(\"d\"), \"style\": \"{{\" + style + \"}}\"})\n for text in group.findall(\"{http://www.w3.org/2000/svg}text\"):\n actual_text = text.find(\"{http://www.w3.org/2000/svg}tspan\").text\n style = parseStyle(text.get(\"style\"))\n new_text_elem = ET.SubElement(gElem, \"text\", {\"x\": text.get(\"x\"), \"y\": text.get(\"y\"), \"style\": \"{{\" + style + \"}}\"})\n new_text_elem.text = actual_text\n\n outSVGET = ET.ElementTree(outSVG)\n outSVGET.write(\"objects_map_generated_jsx.txt\", \"utf-8\")\n\n with open('objects_map_generated_jsx.txt', 'r') as file :\n filedata = file.read()\n\n filedata = filedata.replace(\"\\\"{\", \"{\")\n filedata = filedata.replace(\"}\\\"\", \"}\")\n filedata = filedata.replace(\"fontFamily:'sansSerif', \", \"\")\n\n with open('objects_map_generated_jsx.txt', 'w') as file:\n file.write(filedata)\n\ndef parseStyle(style):\n tokens = style.split(\";\")\n callback = lambda m: m.group(1).upper()\n tokens = [re.sub(\"-(.)\", callback, t) for t in tokens]\n tokens = [re.sub(\":(.*)\", \":\\'\\g<1>\\'\", t) for t in tokens]\n return \", \".join(tokens)\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5746300220489502, "alphanum_fraction": 0.5826638340950012, "avg_line_length": 28.209877014160156, "blob_id": "d77f081d58f0462fa2f72b7d3e9b5ec467177066", "content_id": "9ac0b78b25ee84859b934510dc46085ab682f906", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2365, "license_type": "no_license", "max_line_length": 167, "num_lines": 81, "path": "/src/components/Objects.js", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\nimport ObjectsMap from \"./ObjectsMap\"\n\nimport \"./Objects.css\";\n\nfunction Objects(props) {\n const [data, setData] = useState([]);\n const [hovering, setHovering] = useState(false);\n const [hoverDescription, setHoverDescription] = useState([]);\n const [mousePosition, setMousePosition] = useState({x: 0, y: 0});\n\n useEffect(() => {\n fetch(\"pogrebno_api/objects\")\n .then(response => response.json())\n .then(result => {\n if (result.error !== undefined) {\n console.error(result.error);\n }\n else {\n setData(result);\n }\n })\n }, []);\n\n function objectClicked(e) {\n props.objectClicked(data.filter((x) => x.id == e.target.id)[0]);\n }\n\n function objectMouseEnter(e) {\n e.target.style.fill = \"#808080\";\n fetch(\"pogrebno_api/objects/\" + e.target.id + \"/subjects\")\n .then(response => response.json())\n .then(result => {\n if (result.error !== undefined) {\n console.error(result.error);\n }\n else {\n let desc = [];\n let index = 0;\n desc.push(<span key = {index}>{result[0].field} {result[0].location}</span>);\n index++;\n desc.push(<br key = {index}/>)\n index++;\n result.forEach(element => {\n desc.push(<span key = {index}>{element.first_name} {element.last_name}</span>);\n index++;\n desc.push(<br key = {index}/>)\n index++;\n });\n setHoverDescription(desc);\n }\n })\n setHovering(true);\n }\n\n function objectMouseMove(e) {\n let finalX = e.clientX + 10;\n let finalY = e.clientY + 10;\n setMousePosition({x: finalX, y: finalY});\n }\n\n function objectMouseLeave(e) {\n e.target.style.fill = \"#d3d3d3\";\n setHovering(false);\n }\n\n return (\n <>\n <div className = {\"objects-top\"}/>\n <div className = {\"objects-container\"}>\n <div className = {hovering ? \"objects-tooltip visible\" : \"objects-tooltip\"} style = {{position: \"absolute\", top: mousePosition.y, left: mousePosition.x}}>\n {hoverDescription}\n </div>\n <ObjectsMap objectClicked = {objectClicked} objectMouseEnter = {objectMouseEnter} objectMouseMove = {objectMouseMove} objectMouseLeave = {objectMouseLeave}/>\n </div>\n <div className = {\"objects-bottom\"}></div>\n </>\n );\n}\n\nexport default Objects;" }, { "alpha_fraction": 0.5702073574066162, "alphanum_fraction": 0.572571873664856, "avg_line_length": 35.90604019165039, "blob_id": "709731ae063bf0c7b71aee926a478874b720e921", "content_id": "c6ff363acb8a28d947f40941b37941f1149baab1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 5502, "license_type": "no_license", "max_line_length": 164, "num_lines": 149, "path": "/src/components/Finances.js", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\nimport Table from \"./table/Table\";\nimport {toCurrency, toMinorDenomination, toJSDateFromISODate} from \"../util/Util\"\n\nimport \"./Finances.css\";\n\nfunction Finances(props) {\n const [invoicesData, setInvoicesData] = useState([]);\n const [transfersData, setTransfersData] = useState([]);\n const [snackbar, setSnackbar] = useState({class:\"finances-snackbar hide\", text:\"\"});\n\n useEffect(() => {\n fetch(\"pogrebno_api/invoices\")\n .then(response => response.json())\n .then(result => {\n if (result.error !== undefined) {\n console.error(result.error);\n }\n else {\n result.forEach(element => {\n element.debt = toCurrency(element.debt);\n element.credit = toCurrency(element.credit);\n element.invoice_date = toJSDateFromISODate(element.invoice_date);\n });\n setInvoicesData(result);\n }\n });\n\n fetch(\"pogrebno_api/transfers\")\n .then(response => response.json())\n .then(result => {\n if (result.error !== undefined) {\n console.error(result.error);\n }\n else {\n result.forEach(element => {\n element.amount = toCurrency(element.amount);\n element.transfer_date = toJSDateFromISODate(element.transfer_date);\n });\n setTransfersData(result);\n }\n });\n }, []);\n\n function classForTransfersDataCells(value) {\n if (value < 0) {\n return \"less-than-zero\";\n }\n else if (value > 0) {\n return \"greater-than-zero\";\n }\n }\n\n function classForInvoicesDataCells(value) {\n if (value === \"Da\") {\n return \"paid\";\n }\n else if (value === \"Ne\") {\n return \"not-paid\";\n }\n else if (value === \"Dj.\") {\n return \"partial\";\n }\n }\n\n function footerFuncForInvoices(data) {\n let sumDebt = 0;\n let sumCredit = 0;\n data.forEach(item => {\n sumDebt += toMinorDenomination(item.debt);\n sumCredit += toMinorDenomination(item.credit);\n });\n return \"N: \" + data.length + \", \" + \"SumD: \" + toCurrency(sumDebt)+ \", \" + \"SumP: \" + toCurrency(sumCredit);\n }\n\n function footerFuncForTransfers(data) {\n let sumAmount = 0;\n data.forEach(item => {\n sumAmount += toMinorDenomination(item.amount);\n });\n return \"N: \" + data.length + \", \" + \"SumI: \" + toCurrency(sumAmount);\n }\n\n function tableCellClicked(cellData, rowData) {\n if (cellData.subject_id !== undefined) {\n let snackObjShow = {class: \"finances-snackbar show\", text: rowData.subject_full_name};\n let snackObjHide = {class: \"finances-snackbar hide\", text: rowData.subject_full_name};\n setSnackbar(snackObjShow);\n setTimeout(() => {setSnackbar(snackObjHide)}, 1000);\n }\n if (cellData.invoice_desc_id !== undefined) {\n let snackObjShow = {class: \"finances-snackbar show\", text: rowData.invoice_desc};\n let snackObjHide = {class: \"finances-snackbar hide\", text: rowData.invoice_desc};\n setSnackbar(snackObjShow);\n setTimeout(() => {setSnackbar(snackObjHide)}, 1000);\n }\n }\n\n return (\n <>\n <div className = {\"finances-top\"}/>\n <div className = {\"finances-container\"}>\n <input type=\"radio\" name=\"finances-tables-tab\" id=\"finances-tables-tab-invoices\" defaultChecked/>\n <label className = \"finances-tables-label\" htmlFor=\"finances-tables-tab-invoices\">Računi</label>\n <input type=\"radio\" name=\"finances-tables-tab\" id=\"finances-tables-tab-transfers\" />\n <label className = \"finances-tables-label\" htmlFor=\"finances-tables-tab-transfers\">Transferi</label>\n <div className = {\"finances-invoices\"}>\n <div className = {\"finances-invoices-table\"}>\n <Table data = {invoicesData} filter footerFunc = {footerFuncForInvoices} title = {\"Računi:\"} cellClicked = {tableCellClicked} columnDefinitions = {\n [\n {name: \"id\", alias: \"Id\"},\n {name: \"invoice_date\", alias: \"Datum\"},\n {name: \"debt\", alias: \"Duguje\"},\n {name: \"credit\", alias: \"Potražuje\"},\n {name: \"subject_id\", alias: \"SubjId\"},\n {name: \"subject_full_name\", alias: \"Subjekt\"},\n {name: \"invoice_desc_id\", alias: \"OpId\"},\n {name: \"invoice_desc\", alias: \"Opis\"},\n {name: \"paid\", alias: \"Pl\", cellCustomClassFunc: classForInvoicesDataCells}\n ]\n }\n />\n </div>\n </div>\n <div className = {\"finances-transfers\"}>\n <div className = {\"finances-transfers-table\"}>\n <Table data = {transfersData} filter footerFunc = {footerFuncForTransfers} title = {\"Transferi:\"} cellClicked = {tableCellClicked} columnDefinitions = {\n [\n {name: \"id\", alias: \"Id\"},\n {name: \"transfer_date\", alias: \"Datum\"},\n {name: \"amount\", alias: \"Iznos\", cellCustomClassFunc: classForTransfersDataCells},\n {name: \"invoice_id\", alias: \"RačunId\"},\n {name: \"subject_id\", alias: \"SubjId\"},\n {name: \"subject_full_name\", alias: \"Subjekt\"},\n {name: \"invoice_desc_id\", alias: \"OpId\"},\n {name: \"invoice_desc\", alias: \"Opis\"}\n ]\n }\n />\n </div>\n </div>\n </div>\n <div className = {\"finances-bottom\"}/>\n <div className = {snackbar.class}>{snackbar.text}</div>\n </>\n );\n}\n\nexport default Finances;" }, { "alpha_fraction": 0.5227272510528564, "alphanum_fraction": 0.5227272510528564, "avg_line_length": 27.490196228027344, "blob_id": "b2c9f83ac1ffca86a4495c8aab9d7ec0464bbcdd", "content_id": "9b644dd10b0acc4a0fdb677274c48f788ba99624", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1452, "license_type": "no_license", "max_line_length": 165, "num_lines": 51, "path": "/src/components/Subjects.js", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\nimport Table from \"./table/Table\";\n\nimport \"./Subjects.css\";\n\nfunction Subjects(props) {\n const [data, setData] = useState([]);\n\n useEffect(() => {\n fetch(\"pogrebno_api/subjects\")\n .then(response => response.json())\n .then(result => {\n if (result.error !== undefined) {\n console.error(result.error);\n }\n else {\n setData(result);\n }\n })\n }, []);\n\n function subjectsTableCellClicked(cellData, rowData) {\n props.subjectClicked(rowData);\n }\n\n return (\n <>\n <div className = {\"subjects-top\"}/>\n <div className = {\"subjects-container\"}>\n <div className = {\"subjects-table\"}>\n <Table data = {data} filter footerFunc = {(data) => \"N: \" + data.length} cellClicked = {subjectsTableCellClicked} highlightedRowHover columnDefinitions = {\n [\n {name: \"id\", alias: \"Id\"},\n {name: \"first_name\", alias: \"Ime\"},\n {name: \"last_name\", alias: \"Prezime\"},\n {name: \"nick_name\", alias: \"Nadimak\"},\n {name: \"address\", alias:\"Adresa\"},\n {name: \"city\", alias:\"Mjesto\"},\n {name: \"mob\", alias:\"Mob\"},\n {name: \"email\", alias:\"Mail\"},\n {name: \"subject_type\", alias:\"Tip\"},\n ]\n }/>\n </div>\n </div>\n <div className = {\"subjects-bottom\"}></div>\n </>\n );\n}\n\nexport default Subjects;" }, { "alpha_fraction": 0.6931297779083252, "alphanum_fraction": 0.6931297779083252, "avg_line_length": 27.521739959716797, "blob_id": "75eb270f5a2eec63f5301e7577aeada6a613e21d", "content_id": "ee7aaf74577ff899efcbfd6723f08d227a7840db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 655, "license_type": "no_license", "max_line_length": 115, "num_lines": 23, "path": "/src/components/table/TableHeaderRow.js", "repo_name": "fgrizelj/pogrebno", "src_encoding": "UTF-8", "text": "import React, { useState, useEffect } from \"react\";\nimport TableHeaderCell from \"./TableHeaderCell\";\n\nfunction TableHeaderRow(props) {\n let tableHeaderCells = [];\n\n props.columnDefinitions.forEach(columnDefinition => {\n if (columnDefinition.alias === undefined) {\n tableHeaderCells.push(<TableHeaderCell key = {columnDefinition.name} cellData = {columnDefinition.name}/>);\n }\n else {\n tableHeaderCells.push(<TableHeaderCell key = {columnDefinition.alias} cellData = {columnDefinition.alias}/>);\n }\n });\n\n return (\n <tr className = {\"table-header-row\"}>\n {tableHeaderCells}\n </tr>\n );\n}\n\nexport default TableHeaderRow;" } ]
11
UCL-EO/leaf
https://github.com/UCL-EO/leaf
d766f106917ee4200bfe9ed72e5a559a1a28b687
b6751acee636ded88bbbaa2cd5cc39ae7a46e258
b33930a7d8e382985d7e8a8bc94fb5ccecbceefd
refs/heads/master
2022-05-17T14:38:12.890667
2022-04-11T16:13:47
2022-04-11T16:13:47
49,721,330
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.4968404471874237, "alphanum_fraction": 0.5189573168754578, "avg_line_length": 27.772727966308594, "blob_id": "bdb69072514aa7f71234ec81c0ead8171472d482", "content_id": "5a2fdcbda5058463e80107ffee6ebad5d58cf1e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1266, "license_type": "no_license", "max_line_length": 86, "num_lines": 44, "path": "/leaf/getdata.py", "repo_name": "UCL-EO/leaf", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: iso-8859-15 -*-\n\nimport numpy as np\nfrom os import sep\nfrom pathlib import Path\n\n\ndef getdata(here,dfile='dataSpec_P5B.py'):\n '''\n grab spectral datasets from prospect file\n '''\n try:\n lines = Path(here + sep + dfile).read_text(encoding='utf-8').split('\\n')\n except:\n try:\n lines = dataSpec_P5B.split('\\n')\n except:\n self.error('unable to find database information in %s'%dfile)\n self.exit()\n data = {}\n for i in np.arange(len(lines)):\n if lines[i].find('nw=')>0:\n nw = int(lines[i].split('nw=')[1])\n if lines[i].find('DATA')>0:\n # get the name, eg lambda\n term = lines[i].split('(')[1]\n # get the indices\n n = np.array(lines[i].split('i=')[1].split(')')[0].split(',')).astype(int) - 1\n # first time\n if n[0] == 0:\n data[term] = np.zeros(nw)\n # get next lines until find /\n liner = lines[i].split('i=')[1].split(')')[1].strip()\n while liner[-1] == '&':\n i += 1\n liner += lines[i].strip()\n try:\n liner = np.array(liner.replace('&','')[1:-1].split(',')).astype(float)\n except:\n liner = 0.\n # now load\n data[term][n[0]:n[1]+1] = liner\n return data\n" }, { "alpha_fraction": 0.44417786598205566, "alphanum_fraction": 0.5438265800476074, "avg_line_length": 30.205883026123047, "blob_id": "086919eddfd3af05382da0f5202213ef04202b26", "content_id": "c600156ce72e72fa9b39dbb497dbf54d3be22f12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11671, "license_type": "no_license", "max_line_length": 98, "num_lines": 374, "path": "/build/lib.macosx-10.5-x86_64-2.7/leaf/__init__.py", "repo_name": "UCL-EO/leaf", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# -*- coding: iso-8859-15 -*-\n\nimport numpy as np\nimport scipy.interpolate\nimport getdata\nfrom scipy.interpolate import interp1d\nimport dataSpec_P5B\n\ndef trans_prosail ( N, cab, car, cbrown, cw, cm, lai, lidfa, lidfb, rsoil, psoil, \\\n hspot, tts, tto, psi, typelidf):\n \"\"\"A version of PROSAIL that uses transformed parameters to quasi-linearise\n the model. See http://dx.doi.org/10.1016/j.rse.2011.12.027\"\"\"\n # Define the constants\n slai = -2.0\n skab = -100.0\n skar = -100.0\n skw = -1./50.\n skm = -1./100.\n # Transform the parameters to real units\n xlai = slai * np.log ( lai )\n xkab = skab * np.log ( cab )\n xkar = skar * np.log ( car )\n xkw = skw * np.log ( cw )\n xdm = skm * np.log ( cm )\n # Run the PROSAIL model\n retval = run_prosail ( N, xkab, xkar, cbrown, xkw, xdm, xlai, \\\n lidfa, lidfb, rsoil, psoil, hspot, tts, tto, psi, typelidf )\n return retval\n\n# nested stuff\n'''\nDefine soil\n'''\n\n\ncharsoil = '''\n350 0.04,1220 0.05,1790 0.06,2300 0.07,2500 0.06'''\n\ndef soil(x,scale=None,trans=False):\n '''\n x is a dictionary\n \n ************************************\n x['spectra'] should contain spectra\n ************************************\n \n e.g.\n \n x['spectra']['dry']\n x['spectra']['char']\n \n etc.\n \n If this doesnt exist, it is loaded from the fortran data files\n and provides 'cab', 'car', 'cbrown', 'cw', 'cm'\n \n ************************************\n x['params'] should contain concentration values\n ************************************\n \n e.g.\n \n x['params']['cbrown']\n x['params']['cab']\n \n etc.\n \n '''\n # wavelength\n if not 'lamdba' in x:\n #try:\n # x['lambda'] = eval('mod_dataspec_p5b.lambda')\n #except:\n x['lambda'] = np.arange(400.,2501.)\n\n x['nw'] = len(x['lambda'])\n\n if not 'params' in x:\n # return zero arrays of correct size if no parameters specified\n return np.zeros(x['nw']),np.zeros(x['nw']),x\n\n if not 'spectra' in x:\n x['spectra'] = {}\n\n # load defaults\n result = np.zeros(x['nw'])\n for p in x['params']:\n if not p in x['spectra']:\n if p is 'dry':\n x['spectra'][p] = mod_dataspec_p5b.rsoil1\\\n\t\t\t\t-np.min(mod_dataspec_p5b.rsoil1)\n x['spectra'][p] /= x['spectra'][p].max()\n if p is 'wet':\n x['spectra'][p] = mod_dataspec_p5b.rsoil2\\\n\t\t\t\t-np.min(mod_dataspec_p5b.rsoil2)\n x['spectra'][p] /= x['spectra'][p].max()\n if p is 'char':\n cchar = np.array([np.array(i.split()).astype(float) for i in charsoil.split(',')]).T\n x['spectra'][p] = \\\n scipy.interpolate.interp1d(cchar[0],cchar[1])(x['lambda'])\n try:\n result += x['params'][p] * x['spectra'][p]\n except:\n pass\n return result,x\n\n\n\nimport numpy as np\nfrom getdata import getdata\nimport os\nimport sys\n\n'''\nDefine leaf absorbing constituents\n'''\n\nclass Leaf():\n def __init__(self,theta=40.0,store=False,verbose=False,TINY=1e-20):\n self.verbose=verbose\n self.theta = theta\n self.store=store\n self.TINY = TINY\n self.errors = []\n self.internal_db = os.path.abspath(os.path.realpath(__file__))\\\n\t\t.replace('__init__.pyc','').replace('__init__.py','')\n if verbose: print self.internal_db\n\n self.db = getdata(self.internal_db)\n self.data = self.db.copy()\n self.nw = self.db['refractive'].shape\n\n def error(self,msg):\n print msg\n sys.exit(0)\n\n def verbose_level(self,level=0):\n self.verbose = level\n\n def add_spectra(self,s,conc=None):\n '''\n s is a dictionary \n\n It can contain:\n\n 'N' : number of leaf layers\n 'lambda' : new spectral sampling (e.g. vis/nir only)\n You can change the spectral sampling\n by putting a new lambda in s\n then all existing data transformed to this\n 'theta' : new angle for transmission calculation\n\n also, arrays:\n 'xx' : Either: array of shape (2,nl_) specifying\n wavelength (nm) and absorption coefficients for \n term 'k_xx'. The data are interpolated\n from the specified wavelengths (column 0)\n to those in self.data['lambda'] \n Or: array of shape(nl,) of absorption\n coefficients as a function of self.data['lambda']\n\n Note: specifying s['lambda'] is applied before loading any 'xx' terms.\n \n Note: interp1d used, which may have bounds fail, so should be made safe.\n\n '''\n if 'N' in s:\n self.data['N'] = s['N']\n\n if 'lambda' in s:\n for k in self.data:\n if not (k == 'lambda' or k == 'N' or k == 'theta'):\n f = interp1d(self.data['lambda'],self.data[k])\n self.data[k] = f(s['lambda'])\n self.data['lambda'] = s['lambda'] \n \n for k in s:\n if self.verbose: print 'considering',k\n if len(k) > 2 and (k[0] == 'k' or k[0] == 'K'):\n k0 = k\n k = k[2:]\n kk = 'k_'+k\n if self.verbose:\n print 'trying',k,'for',kk\n try:\n f = interp1d(s[k0][0],s[k0][1])\n self.data[kk] = f(s['lambda'])\n except:\n if s[k].shape == self.data['lambda'].shape:\n self.data[kk] = s[k0]\n else:\n err = 'error in spectra specification in Leaf.add_spectra() for '+k+' '+kk\n self.errors.append(err)\n if self.verbose: print err\n # update\n self.nw = self.data['refractive'].shape\n\n def getk(self,conc):\n '''\n Interpret concentration data dictionary\n into k\n '''\n if 'N' in conc:\n self.N = conc['N']\n else:\n self.N = 1.0\n\n self.conc = {}\n kvalue = np.zeros(self.nw) \n for k in conc.keys():\n # find spectra in database\n kk = 'k_' + k\n if kk in self.data:\n if self.verbose:\n print 'found',kk,'from',k,'at',conc[k]\n self.conc[kk] = conc[k]\n kvalue += conc[k] * self.data[kk]\n # it will cause issues if 0, so let it be tiny\n kvalue[np.where(kvalue<self.TINY)] = self.TINY\n return kvalue/self.N\n\n def rt(self,conc):\n '''\n calculate reflectance and transmittance\n for given concentrations\n\n '''\n self.t1 = self.tav_abs(90.)\n if 'theta' in conc:\n self.t2 = self.tav_abs(conc['theta'])\n else:\n self.t2 = self.tav_abs(self.theta)\n # should log which was used\n\n self.conc = conc \n self.k = self.getk(conc)\n # note self.N might not be set until\n # self.getk() is called\n N = self.N\n\n self.tau = np.zeros_like(self.k)\n # upper limit\n ww = np.where(self.k >= 85)[0]\n if len(ww): \n self.tau[ww] = 0.\n # lower limit\n ww = np.where(self.k <= 4)[0]\n if len(ww):\n xx=0.5*self.k[ww]-1.0\n yy=(((((((((((((((-3.60311230482612224e-13 \\\n *xx+3.46348526554087424e-12)*xx-2.99627399604128973e-11) \\\n *xx+2.57747807106988589e-10)*xx-2.09330568435488303e-9) \\\n *xx+1.59501329936987818e-8)*xx-1.13717900285428895e-7) \\\n *xx+7.55292885309152956e-7)*xx-4.64980751480619431e-6) \\\n *xx+2.63830365675408129e-5)*xx-1.37089870978830576e-4) \\\n *xx+6.47686503728103400e-4)*xx-2.76060141343627983e-3) \\\n *xx+1.05306034687449505e-2)*xx-3.57191348753631956e-2) \\\n *xx+1.07774527938978692e-1)*xx-2.96997075145080963e-1\n yy=(yy*xx+8.64664716763387311e-1)*xx+7.42047691268006429e-1\n yy=yy-np.log(self.k[ww])\n self.tau[ww] = (1.0-self.k[ww])*np.exp(-self.k[ww])+self.k[ww]**2*yy\n\n ww = np.where((self.k > 4) * (self.k <= 85))[0]\n if len(ww):\n xx=14.5/(self.k[ww]+3.25)-1.0\n yy=(((((((((((((((-1.62806570868460749e-12 \\\n *xx-8.95400579318284288e-13)*xx-4.08352702838151578e-12) \\\n *xx-1.45132988248537498e-11)*xx-8.35086918940757852e-11) \\\n *xx-2.13638678953766289e-10)*xx-1.10302431467069770e-9) \\\n *xx-3.67128915633455484e-9)*xx-1.66980544304104726e-8) \\\n *xx-6.11774386401295125e-8)*xx-2.70306163610271497e-7) \\\n *xx-1.05565006992891261e-6)*xx-4.72090467203711484e-6) \\\n *xx-1.95076375089955937e-5)*xx-9.16450482931221453e-5) \\\n *xx-4.05892130452128677e-4)*xx-2.14213055000334718e-3\n yy=((yy*xx-1.06374875116569657e-2)*xx-8.50699154984571871e-2)*xx+\\\n 9.23755307807784058e-1\n yy=np.exp(-self.k[ww])*yy/self.k[ww]\n self.tau[ww]=(1.0-self.k[ww])*np.exp(-self.k[ww])+self.k[ww]**2*yy\n\n tau = self.tau\n refr2 = self.data['refractive']**2\n x1=1-self.t1\n x2=self.t1**2*tau**2*(refr2-self.t1)\n x3=self.t1**2*tau*refr2\n x4=refr2*refr2-tau**2*(refr2-self.t1)**2\n x5=self.t2/self.t1\n x6=x5*(self.t1-1)+1-self.t2\n r=x1+x2/x4\n t=x3/x4\n ra=x5*r+x6\n ta=x5*t\n\n # to store if needed\n if self.store:\n self.c1=x6\n self.ra=x5*ra+x6\n self.ta=x5*ta\n self.c2=x1\n self.r1=x1+x2/x4\n self.t1=x3/x4\n \n '''\n reflectance and transmittance of N layers\n Stokes G.G. (1862), On the intensity of the light reflected from or transmitted\n through a pile of plates, Proceedings of the Royal Society of London, 11:545-556.\n ''' \n r[r<self.TINY]=self.TINY\n delta=(t**2-r**2-1)**2-4*r**2\n delta[delta<0]=0.\n beta=(1+r**2-t**2-np.sqrt(delta))/(2*r)\n va=(1+r**2-t**2+np.sqrt(delta))/(2*r)\n va[va<self.TINY]=self.TINY\n beta[np.abs(beta-r)<self.TINY]+=self.TINY\n den = (va*(beta-r))\n den[den<self.TINY]=self.TINY\n ss = beta*(va-r)/den\n ss[ss<0.] = 0.\n vb=np.sqrt(ss)\n s1=ra*(va*vb**(N-1)-va**(-1)*vb**(-(N-1)))+(ta*t-ra*r)*(vb**(N-1)-vb**(-(N-1)))\n s2=ta*(va-va**(-1))\n s3=va*vb**(N-1)-va**(-1)*vb**(-(N-1))-r*(vb**(N-1)-vb**(-(N-1)))\n self.r=s1/s3\n self.t=s2/s3\n \n '''\n tau average\n '''\n def tav_abs(self,theta):\n '''\n average transmittance for given refractive index\n\n computation of the average transmittivity at the leaf surface within a given\n solid angle. teta is the incidence solid angle (in radian). The average angle\n that works in most cases is 40deg*pi/180. ref is the refaction index.\n ********************************************************************************\n Stern F. (1964), Transmission of isotropic radiation across an interface between\n two dielectrics, Applied Optics, 3:111-113.\n Allen W.A. (1973), Transmission of isotropic light across a dielectric surface in\n two and three dimensions, Journal of the Optical Society of America, 63:664-666.\n ********************************************************************************^\n version 5.02 (25 July 2011)\n\n\n '''\n refr = self.data['refractive']\n thetarad=np.pi*theta/180.\n if theta == 0:\n res=4.*refr/(refr+1.)**2\n return res\n\n refr2=refr*refr\n ax=(refr+1.)**2/2.\n bx=-(refr2-1.)**2/4.\n\n if thetarad == np.pi/2.:\n b1=0.\n else:\n b1=np.sqrt((np.sin(thetarad)**2-(refr2+1.)/2.)**2+bx)\n\n b2=np.sin(thetarad)**2-(refr2+1.)/2.\n b0=b1-b2\n ts=(bx**2/(6.*b0**3)+bx/b0-b0/2.)-(bx**2/(6.*ax**3)+bx/ax-ax/2.)\n tp1=-2.*refr2*(b0-ax)/(refr2+1.)**2\n tp2=-2.*refr2*(refr2+1.)*np.log(b0/ax)/(refr2-1.)**2\n tp3=refr2*(1./b0-1./ax)/2.\n tp4=16.*refr2**2*(refr2**2+1.)*np.log((2.*(refr2+1.)*b0-(refr2-1.)**2)/ \\\n (2.*(refr2+1.)*ax-(refr2-1.)**2))/((refr2+1.)**3*(refr2-1.)**2)\n tp5=16.*refr2**3*(1./(2.*(refr2+1.)*b0-((refr2-1.)**2))-1./(2.*(refr2+1.) \\\n *ax-(refr2-1.)**2))/(refr2+1.)**3\n tp=tp1+tp2+tp3+tp4+tp5\n res=(ts+tp)/(2.*np.sin(thetarad)**2)\n\n return res\n" }, { "alpha_fraction": 0.6431334614753723, "alphanum_fraction": 0.6470019221305847, "avg_line_length": 38.769229888916016, "blob_id": "2429fc95979a821bdc54a27fe55ff8edf6b69219", "content_id": "27e49547acc454be2a0e1ebd9d2370cebf169b45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1034, "license_type": "no_license", "max_line_length": 73, "num_lines": 26, "path": "/setup.py", "repo_name": "UCL-EO/leaf", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\"\"\"Setup script for building leaf's python bindings\"\"\"\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration\n config = Configuration(parent_package,top_path)\n config.add_extension('leaf', [ \\\n \"leaf/getdata.py\",'leaf/dataSpec_P5B.py',\"leaf/__init__.py\"] )\n return config\n\nif __name__ == \"__main__\":\n from numpy.distutils.core import setup\n # Global variables for this extension:\n name = \"leaf\" # name of the generated python extension (.so)\n description = \"PROSPECT-based leaf scattering method\"\n long_description = \"The PROSPECT RT model with flexible inputs.\"\n author = \"Prof. P. Lewis/NCEO & University College London\"\n author_email = \"[email protected]\"\n url = \"https://github.com/UCL-EO/leaf.git\"\n \n setup( name=name,\\\n description=description, \\\n author=author, \\\n author_email = author_email, \\\n configuration = configuration, version=\"1.0.2\",\\\n packages=[\"leaf\"])\n" }, { "alpha_fraction": 0.695652186870575, "alphanum_fraction": 0.695652186870575, "avg_line_length": 10.5, "blob_id": "7c9a6f960970071b3e165da1751265e35c9f9a21", "content_id": "89df7806633113c87bee691857bde556acdf8943", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 23, "license_type": "no_license", "max_line_length": 15, "num_lines": 2, "path": "/README.md", "repo_name": "UCL-EO/leaf", "src_encoding": "UTF-8", "text": "# leaf\nleaf RT model/s\n" } ]
4
kilbyjmichael/pi_temp
https://github.com/kilbyjmichael/pi_temp
cad94eeded4d409492b81734542cabbab49148c5
72c08b16eda11739e9ccc369d7020355b665c7e0
789f1b40d63f2209dde7f5ab5601dd7008208561
refs/heads/master
2021-07-25T02:33:05.914899
2017-11-03T06:35:05
2017-11-03T06:35:05
107,506,646
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7763158082962036, "alphanum_fraction": 0.7763158082962036, "avg_line_length": 24.33333396911621, "blob_id": "98dd70c128c7b93164ba15d071db4b54b7faf855", "content_id": "38bc1956cd568b749d9b18829f8a019bb3f9380e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 76, "license_type": "permissive", "max_line_length": 64, "num_lines": 3, "path": "/README.md", "repo_name": "kilbyjmichael/pi_temp", "src_encoding": "UTF-8", "text": "# pi_temp\n\nset of scripts to read temperature from a dallas one wire sensor\n" }, { "alpha_fraction": 0.5530492663383484, "alphanum_fraction": 0.598162055015564, "avg_line_length": 22.940000534057617, "blob_id": "a55bb879dd4857c742c06bc4bc1ca0336e9f0f1c", "content_id": "f6524b9bca5f4f910657a21ad9c7139ba9916bf9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1197, "license_type": "permissive", "max_line_length": 65, "num_lines": 50, "path": "/graph.py", "repo_name": "kilbyjmichael/pi_temp", "src_encoding": "UTF-8", "text": "import sqlite3\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nfrom datetime import datetime\nfrom matplotlib import style\nstyle.use(['seaborn-poster'])\n\nconn = sqlite3.connect(r\"temp.db\")\nc = conn.cursor()\n\ndef graph_all_data():\n c.execute('SELECT time, temp FROM office')\n data = c.fetchall()\n\n dates = []\n temps = []\n \n for row in data: # 2017-10-19 05:42:40.277908\n parsed = datetime.strptime(row[0],\"%Y-%m-%d %H:%M:%S.%f\")\n dates.append(parsed)\n temps.append(row[1])\n\n plt.plot_date(dates,temps,'',label='temp')\n ax = plt.gca()\n ax.grid(True)\n plt.show()\n\ndef graph_data(choose_date):\n c.execute('SELECT time, temp FROM office')\n data = c.fetchall()\n\n dates = []\n temps = []\n\n dayg = datetime.strptime(choose_date, \"%Y-%m-%d\")\n \n for row in data: # 2017-10-19 05:42:40.277908\n parsed = datetime.strptime(row[0],\"%Y-%m-%d %H:%M:%S.%f\")\n if parsed.date() == dayg.date():\n dates.append(parsed)\n temps.append(row[1])\n\n plt.plot_date(dates,temps,'',label='temp')\n ax = plt.gca()\n ax.grid(True)\n plt.show()\n\n#graph_data(\"2017-10-16\")\ngraph_all_data()\nc.close()\n" }, { "alpha_fraction": 0.5662921071052551, "alphanum_fraction": 0.5775281190872192, "avg_line_length": 28.66666603088379, "blob_id": "63fd47d08165970d03212ee07e4bbfcd00286e57", "content_id": "1e2181854fb913ac61559f83722fc8b7158a83e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 890, "license_type": "permissive", "max_line_length": 136, "num_lines": 30, "path": "/temp.py", "repo_name": "kilbyjmichael/pi_temp", "src_encoding": "UTF-8", "text": "import serial\nfrom datetime import datetime\nimport time\nimport sqlite3\n\ndef serial_data():\n ser = serial.Serial(port='/dev/serialyourserial', baudrate=9600)\n \n while True:\n yield ser.readline()\n ser.close()\n \ndef main():\n db_filename = 'temp.db'\n connection = sqlite3.connect(db_filename)\n cursor = connection.cursor()\n cursor.execute('''CREATE TABLE if not exists office (\n time date primary key,\n temp float)''')\n \n while True:\n try:\n print(\"writing \" + next(serial_data()).decode(\"utf-8\"))\n cursor.execute(\"INSERT INTO office (time, temp) VALUES (?, ?)\", (datetime.now(),float(next(serial_data()).decode(\"utf-8\"))))\n connection.commit()\n time.sleep(30)\n except KeyboardInterrupt:\n connection.close()\n\nif __name__ == \"__main__\":main()\n" }, { "alpha_fraction": 0.5691428780555725, "alphanum_fraction": 0.5946666598320007, "avg_line_length": 28.829545974731445, "blob_id": "2612c3e73fdcc973f3188da011520aa230a5682a", "content_id": "abe7eb0d987c5db6e36d85422a235e3ef9f58553", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2625, "license_type": "permissive", "max_line_length": 98, "num_lines": 88, "path": "/led_temp.py", "repo_name": "kilbyjmichael/pi_temp", "src_encoding": "UTF-8", "text": "from w1thermsensor import W1ThermSensor\nimport RPi.GPIO as GPIO\nfrom datetime import datetime\nimport time\nimport sqlite3\n\nGPIO.setmode(GPIO.BCM)\nblue_led = 16\norange_led = 20\nred_led = 21\nGPIO.setup(blue_led,GPIO.OUT)\nGPIO.setup(orange_led,GPIO.OUT)\nGPIO.setup(red_led,GPIO.OUT)\n\ndef light_reset():\n GPIO.output(blue_led,GPIO.LOW)\n GPIO.output(orange_led,GPIO.LOW)\n GPIO.output(red_led,GPIO.LOW)\n \ndef blink_light(chosen_light):\n GPIO.output(chosen_light,GPIO.HIGH)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.LOW)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.HIGH)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.LOW)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.HIGH)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.LOW)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.HIGH)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.LOW)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.HIGH)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.LOW)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.HIGH)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.LOW)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.HIGH)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.LOW)\n time.sleep(0.25)\n GPIO.output(chosen_light,GPIO.HIGH)\n \ndef main():\n db_filename = 'temp.db'\n connection = sqlite3.connect(db_filename)\n cursor = connection.cursor()\n cursor.execute('''CREATE TABLE if not exists stephs (\n time date primary key,\n temp float)''')\n \n sensor = W1ThermSensor()\n \n while True:\n try:\n temp = sensor.get_temperature(W1ThermSensor.DEGREES_F)\n print(\"writing \" + str(temp))\n if temp < 70.0:\n chosen_light = blue_led\n elif temp >= 70.0 and temp <= 75.0:\n chosen_light = orange_led\n elif temp > 75.0:\n chosen_light = red_led\n else:\n light_reset()\n GPIO.output(blue_led,GPIO.HIGH)\n GPIO.output(orange_led,GPIO.HIGH)\n GPIO.output(red_led,GPIO.HIGH)\n \n light_reset()\n GPIO.output(chosen_light,GPIO.HIGH)\n cursor.execute(\"INSERT INTO stephs (time, temp) VALUES (?, ?)\", (datetime.now(),temp))\n connection.commit()\n\n time.sleep(2)\n blink_light(chosen_light)\n except KeyboardInterrupt:\n connection.close()\n GPIO.cleanup()\n\nif __name__ == \"__main__\":main()\n" } ]
4
Weiyuchuan/qimaishuju
https://github.com/Weiyuchuan/qimaishuju
2019213c129387d493e4095f32123c298ed93a8c
aaf3fe5786ce21db3802510792951744f1f92e20
467c46715fc0500d1c462308881f88ce9da122e2
refs/heads/master
2020-05-02T10:23:23.873396
2019-03-27T01:27:27
2019-03-27T01:27:27
177,894,328
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6175869107246399, "alphanum_fraction": 0.6421267986297607, "avg_line_length": 39.628570556640625, "blob_id": "e426c2a9378e57ada670b7489bd63fc055091176", "content_id": "06254f9eeecad86ad9028ad2054447d4e15e162d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1501, "license_type": "no_license", "max_line_length": 137, "num_lines": 35, "path": "/七麦数据.py", "repo_name": "Weiyuchuan/qimaishuju", "src_encoding": "UTF-8", "text": "from lxml import etree\r\nimport requests\r\nimport re\r\nfrom selenium import webdriver\r\n\r\n# driver = webdriver.PhantomJS(executable_path=r'C:\\Users\\Administrator\\Desktop\\phantomjs-2.1.1-windows\\bin\\phantomjs.exe')\r\n# url ='https://www.qimai.cn/rank/release'\r\n# driver.get(url=url)\r\n# html=driver.page_source\r\n# print(html)\r\n# with open(\"text.html\",'w',encoding='utf-8')as f:\r\n# f.write(driver.page_source)\r\n\r\nheaders ={\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36\"\r\n\r\n}\r\nresponse =requests.get(url='https://api.qimai.cn/rank/release?analysis=cBMfQlFeWxcWUlQBVUUEcBMDCAQGBgtTDg1XAHZCAQ%3D%3D',headers=headers)\r\n\r\nhtml=response.content.decode('unicode=escape')\r\nprint(html)\r\n# with open(\"text.py\",'w',encoding='utf-8')as f:\r\n# f.write(html)\r\nname_list = re.findall('\"appName\":\"(.*?)\"',html)\r\nprice_list = re.findall('\"price\":\"(.*?)\"',html)\r\ngenre_list = re.findall('\"genre\":\"(.*?)\"',html)\r\nreleaseTime = re.findall('\"releaseTime\":\"(.*?)\"',html)\r\npublisher_list = re.findall('\"publisher\":\"(.*?)\"',html)\r\nwith open(\"信息.txt\",'a',encoding='utf-8')as fp:\r\n for name,price,genre,time,publisher in zip(name_list,price_list,genre_list,releaseTime,publisher_list):\r\n fp.write(\"应用:\"+ name +'\\n')\r\n fp.write(\"公司:\" + publisher + '\\n')\r\n fp.write(\"价格:\" + price + '\\n')\r\n fp.write(\"分类:\" + genre + '\\n')\r\n fp.write(\"时间:\" + time + '\\n'+'\\n')\r\n\r\n\r\n\r\n\r\n\r\n" } ]
1
LazeCode/Text-To-Blogger-Py
https://github.com/LazeCode/Text-To-Blogger-Py
1dd18d99266ec1ddcee8239ffbf25b84bd704499
999b5743c8307438a6b2924cb8a7367128a5df68
5e8e46fd350a35d704982d8b54f07a36b12487ea
refs/heads/master
2023-03-23T23:26:33.296025
2021-03-15T19:52:14
2021-03-15T19:52:14
288,841,175
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.6818181872367859, "alphanum_fraction": 0.6836007237434387, "avg_line_length": 26.769229888916016, "blob_id": "acb1c45e1f97eea9095aee42703a19c7e7d0d3b7", "content_id": "0fe69cbc3510b486b0176340497eae6b82af9b52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1122, "license_type": "no_license", "max_line_length": 83, "num_lines": 39, "path": "/BloggerAPIService.py", "repo_name": "LazeCode/Text-To-Blogger-Py", "src_encoding": "UTF-8", "text": "\r\nimport pickle\r\nimport os\r\nfrom pprint import pprint as pp\r\nfrom google_auth_oauthlib.flow import Flow, InstalledAppFlow\r\nfrom googleapiclient.discovery import build\r\nfrom google.auth.transport.requests import Request\r\nimport google_auth_httplib2\r\n\r\nCLIENT_SECRET_FILE = 'client_secret.json'\r\nAPI_SERVICE_NAME = 'blogger'\r\nAPI_VERSION = 'v3'\r\nSCOPE = ['https://www.googleapis.com/auth/blogger']\r\n\r\ncred = None\r\n\r\nif os.path.exists('token.pickle'):\r\n with open('token.pickle', 'rb') as token:\r\n cred = pickle.load(token)\r\n\r\nif not cred or not cred.valid:\r\n if cred and cred.expired and cred.refresh_token:\r\n cred.refresh(Request())\r\n\r\n else:\r\n flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRET_FILE, SCOPE)\r\n cred = flow.run_local_server()\r\n\r\n with open('token.pickle', 'wb') as token:\r\n pickle.dump(cred, token)\r\n\r\ntry:\r\n Blog = build(API_SERVICE_NAME, API_VERSION, credentials=cred)\r\n print('Service created successfully')\r\n # print(service)\r\nexcept Exception as e:\r\n print(e)\r\n\r\n# resp = service.blogs().get(blogId=BlogId).execute()\r\n# print(resp)" }, { "alpha_fraction": 0.5750805735588074, "alphanum_fraction": 0.5838882923126221, "avg_line_length": 39.1769905090332, "blob_id": "1cf5ce7c8b57e32e2bbd492512a6ae3796e40352", "content_id": "3c35e2ab9289c4cf3cc9fb04cc15ca7aa897cd1a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4655, "license_type": "no_license", "max_line_length": 207, "num_lines": 113, "path": "/Publisher.py", "repo_name": "LazeCode/Text-To-Blogger-Py", "src_encoding": "UTF-8", "text": "\r\n# Scan Databases and Publish them to Blogger.\r\n# Created by https://github.com/LazeCode/\r\n\r\nimport time, sqlite3, io, os, gzip, math\r\nfrom Config import BlogId, linesToUpload\r\n\r\n# --------------------- Google API Functions ------------------------ #\r\nfrom BloggerAPIService import Blog\r\nfrom googleapiclient.discovery import build, MediaFileUpload\r\n\r\n# ------------------ Establish Database Connection ------------------ #\r\ntry:\r\n conn = sqlite3.connect('ProjectDatabase.db')\r\nexcept Error as e:\r\n print(e)\r\nc = conn.cursor()\r\n\r\n# ----------------------- Start time measure ------------------------ #\r\nstartTime = time.time()\r\n\r\n# ----------------------- Define file methods ----------------------- #\r\ndef fileLength(filename):\r\n if filename.endswith(\".gz\"):\r\n with gzip.open(FileLocation, 'rb') as dbFile:\r\n for lineNo, lines in enumerate(dbFile):\r\n pass\r\n else:\r\n with open(filename, errors='replace') as dbFile:\r\n for lineNo, lines in enumerate(dbFile):\r\n pass\r\n return lineNo + 1\r\n\r\ndef fileSizeDefine(fileSize):\r\n if fileSize > 1024000:\r\n fileSizeType = f\"{round(fileSize/102400):,}\" + 'MB'\r\n elif fileSize < 1023990:\r\n fileSizeType = f\"{round(fileSize/1024):,}\" + 'KB'\r\n return fileSizeType\r\n# ------------------------------------------------------------------- #\r\n\r\n# Attempt to create database and tables if not exists.\r\nwith conn:\r\n try:\r\n c.execute(\"\"\" CREATE TABLE FilesUploaded (DatabaseID INTEGER PRIMARY KEY, FileName TEXT, FileSize INTEGER, DateCreated TEXT, NumberOfLines INTEGER, LinesUploaded INTEGER, NumberOfParts INTEGER) \"\"\" )\r\n except:\r\n pass\r\n\r\n# Check if Database file exists in Leaked DB List. If not then add.\r\nfor DatabaseFile in os.listdir('FileToPublish/'):\r\n with conn:\r\n c.execute(\"SELECT count(*) FROM FilesUploaded WHERE FileName = ?\", (DatabaseFile,))\r\n if c.fetchone()[0] == 0:\r\n FileLocation = 'FileToPublish/' + DatabaseFile\r\n size = os.path.getsize(FileLocation)\r\n ctime = time.ctime(os.path.getmtime(FileLocation))\r\n length = fileLength(FileLocation)\r\n print(DatabaseFile + ' - Lines: ' + str(length))\r\n\r\n # Insert scanned database files and metadata to SQL Database\r\n with conn:\r\n c.execute(\"INSERT INTO FilesUploaded (FileName, FileSize, DateCreated, NumberOfLines, LinesUploaded, NumberOfParts) VALUES (?,?,?,?,?,?)\", (DatabaseFile, size, ctime, length, 0, 0,))\r\n print('\\nDatabase File Added to DB. \\n')\r\n else:\r\n pass\r\n print('File Already Exists in DB. \\n')\r\n\r\n# Add line breaks to each line and post to blogger\r\nSelectRows = c.execute(\"SELECT * FROM FilesUploaded WHERE NumberOfLines <> LinesUploaded\").fetchall()\r\nfor EachDatabase in SelectRows:\r\n DatabaseID = EachDatabase[0]\r\n FileName = EachDatabase[1]\r\n FileSize = EachDatabase[2]\r\n DateCreated = EachDatabase[3]\r\n NumberOfLines = EachDatabase[4]\r\n LinesUploaded = EachDatabase[5]\r\n NumberOfParts = EachDatabase[6]\r\n\r\n while LinesUploaded != NumberOfLines:\r\n with open('FileToPublish/' + FileName, errors='replace') as dbFile:\r\n lineCountsInLoop = 0\r\n contentForBlogger = ''\r\n for lineNo, linesInDatabase in enumerate(dbFile):\r\n if lineNo in range(LinesUploaded,LinesUploaded + linesToUpload):\r\n contentForBlogger += '<br>' + linesInDatabase\r\n lineCountsInLoop += 1\r\n pass\r\n pass\r\n \r\n # Set number of parts counter\r\n NumberOfParts += 1\r\n\r\n # Blogger API Post Request\r\n body = {\r\n \"kind\": \"blogger#post\",\r\n \"id\": BlogId,\r\n \"title\": FileName + ' - Size:' + fileSizeDefine(FileSize) + ' - Lines:' + f\"{NumberOfLines:,}\" + ' - Part:' + str(NumberOfParts) + '/' + str(math.ceil(NumberOfLines/linesToUpload)),\r\n \"content\": contentForBlogger\r\n }\r\n PostToBlog = Blog.posts().insert(blogId=BlogId, isDraft=False, body=body).execute()\r\n\r\n # Set number of parts count to database\r\n with conn:\r\n c.execute(\"UPDATE FilesUploaded SET NumberOfParts = ? WHERE DatabaseID = ?\",(NumberOfParts, DatabaseID,))\r\n\r\n print('Set Complete')\r\n LinesUploaded += lineCountsInLoop\r\n\r\n with conn:\r\n c.execute(\"UPDATE FilesUploaded SET LinesUploaded = ? WHERE DatabaseID = ?\",(LinesUploaded, DatabaseID,))\r\n\r\n\r\n# Print time taken for the task\r\nprint(\"\\n--- %s Seconds ---\" % (time.time() - startTime))\r\n" }, { "alpha_fraction": 0.6761904954910278, "alphanum_fraction": 0.723809540271759, "avg_line_length": 32.66666793823242, "blob_id": "57d8e634c8e48393262bef5eb107f8fab3357c5e", "content_id": "319a6ce6fa1b3dda39518e725115a8f4386ffec5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 210, "license_type": "no_license", "max_line_length": 60, "num_lines": 6, "path": "/Config.py", "repo_name": "LazeCode/Text-To-Blogger-Py", "src_encoding": "UTF-8", "text": "\r\n\r\n# Insert your blog ID here\r\nBlogId = \" INSERT YOUR BLOG ID HERE \"\r\n\r\n# This is the maximum line limit to upload per post.\r\n# Recommend around 10,000 lines per post without any issues.\r\nlinesToUpload = 10000" }, { "alpha_fraction": 0.7427117228507996, "alphanum_fraction": 0.7589079141616821, "avg_line_length": 49.4523811340332, "blob_id": "a244b2ad6613e36e68f52473553de663020d31f7", "content_id": "bcd1228b9f02de8979d624bee4c4d2bc120d9fd8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2161, "license_type": "no_license", "max_line_length": 199, "num_lines": 42, "path": "/readme.md", "repo_name": "LazeCode/Text-To-Blogger-Py", "src_encoding": "UTF-8", "text": "# Text to Blogger Python Script\r\n\r\nThis Python script allows you to upload text from multiple files (txt format) within a folder to blogger.\r\nAutomatically uploades a set number of (10,000 default) lines per post from any text document to a blogger blog.\r\nAutomatically calculates the lines uploaded previously using an SQL database.\r\nAlso calculates how many parts are there for each post (if the lines in the text file are larger than set 10,000 amount).\r\n\r\n# Requirements:\r\n- Python 3.8\r\n### Python packages\r\n- pickle\r\n- google_auth_oauthlib.flow\r\n- googleapiclient.discovery\r\n- google.auth.transport.requests\r\n- google_auth_httplib2\r\n\r\n# Setup\r\n## Installing dependencies and running the script\r\n1. Make sure you have [Python 3.7](https://www.python.org/downloads/) installed on your system\r\n2. If needed, install [pipenv](https://pypi.org/project/pipenv/) via `pip install pipenv`\r\n3. Change to the directory where you installed this script\r\n4. Run `pipenv install` to download and install all the dependencies\r\n5. Run `pipenv shell` to open a shell with all the dependencies available (you'll need to do this every time you want to run the script)\r\n6. Now run the script.\r\n\r\n## Setting up the Script\r\n1. Download the script\r\n2. Install all the necessary packages from pip or by downloading them.\r\n3. Create an API with Google for BLogger API.\r\n4. Download the API keys (client.json) and put it in the local folder.\r\n5. Open Config.py and insert the Blog ID from blogger\r\n This is the blog the script is going to post to.\r\n6. Add the files to be uploaded to the folder \"FileToPublish\".\r\n7. To run on windows just run the RunPublisher.bat file or on mac run the \"Publisher.py\"\r\n\r\n## Obtaining a Google Blogger API key\r\n\r\n1. Obtain a Google Blogger API key (Client ID and Client Secret) by following the instructions on [Getting started with Google Blogger REST APIs](https://developers.google.com/blogger/docs/3.0/using)\r\n\r\n**NOTE** When selecting your application type in Step 4 of \"Request an OAuth 2.0 client ID\", please select \"Other\".\r\n\r\n2. Download the credentials as client_secret.json file and save it on the local folder containing this script.\r\n" } ]
4
duowen1/learngit
https://github.com/duowen1/learngit
d46290872195f876f3f5f74ef8857d21bfbd1b64
9c1503b4f548fbda96c099527571fd92331c81b9
64b174db21392520ab2532189207103d6ee840ed
refs/heads/master
2020-12-21T02:01:13.601149
2020-11-09T14:47:52
2020-11-09T14:47:52
236,272,577
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6064516305923462, "alphanum_fraction": 0.6156681776046753, "avg_line_length": 22.106382369995117, "blob_id": "8ddb67d5f6ec774939d015c85e55a3accc210241", "content_id": "ceaf953bc5e11f95dac16e90b2c27f4f0fdcf627", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 81, "num_lines": 47, "path": "/39.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\nusing namespace std;\n\nclass Solution{\n vector<vector<int>> answer;\n vector<int> solutions;\n vector<int> global;\npublic:\n vector<vector<int>> combinationSum(vector<int>& candidates, int target);\n void turtle(int,int);\n};\n\nvoid Solution::turtle(int start,int target){\n if(target==0){\n answer.push_back(solutions);\n }\n\n for(int i=start;i<global.size();i++){\n if(global[i]>target) break;\n solutions.push_back(global[i]);\n turtle(i,target-global[i]);\n solutions.pop_back();\n }\n\n}\n\nvector<vector<int>> Solution::combinationSum(vector<int>& candidates,int target){\n sort(candidates.begin(),candidates.end());\n global=candidates;\n turtle(0,target);\n return answer;\n}\n\nint main(){\n Solution so;\n vector<int> candidates={2,3,6,7};\n int target=7;\n auto solution=so.combinationSum(candidates,target);\n for(int i=0;i<solution.size();i++){\n for(int j=0;j<solution[i].size();j++){\n cout<<solution[i][j];\n }\n cout<<endl;\n }\n return 0;\n}" }, { "alpha_fraction": 0.4332755506038666, "alphanum_fraction": 0.45753899216651917, "avg_line_length": 24.130434036254883, "blob_id": "017b57df2a16df9b294fb82b00ec49414ea73d50", "content_id": "dced4626fb1258ab9915c2701b71bd257b9783fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 577, "license_type": "no_license", "max_line_length": 46, "num_lines": 23, "path": "/11.盛最多水的容器.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "from typing import List\nclass Solution:\n def maxArea(self,height:List[int]) -> int:\n left=0\n right=len(height)-1\n max_sum=0\n while left<right:\n d=right-left\n if height[left]<height[right]:\n s=d*height[left]\n max_sum=max(s,max_sum)\n left+=1\n else:\n s=d*height[right]\n right-=1\n max_sum=max(s,max_sum)\n return max_sum\n \n\n\nif __name__=='__main__':\n so=Solution()\n print(so.maxArea([1,8,6,2,5,4,8,3,7]))" }, { "alpha_fraction": 0.5168350338935852, "alphanum_fraction": 0.5218855142593384, "avg_line_length": 19.517240524291992, "blob_id": "81c3963aba43139af4482799c45270e4771a402e", "content_id": "66bec0e16ebbc0c5e0a370cba7d63b312e7c92d8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 594, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/20.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <stack>\nusing namespace std;\n\nclass Solution{\npublic:\n bool isValid(string s);\n};\n\nbool Solution::isValid(string s){\n stack<char> mystack;\n for(int i=0;i<s.length();i++){\n if(s[i]=='('||s[i]=='['||s[i]=='{') \n mystack.push(s[i]);\n else{\n if((!mystack.empty())&&abs(mystack.top()-s[i])<3) mystack.pop();\n else return false;\n }\n }\n return mystack.empty();\n}\n\nint main(){\n string s=\"()[]{}\";\n Solution so;\n if(so.isValid(s)) cout<<\"True\"<<endl;\n else cout<<\"False\"<<endl;\n return 0;\n}" }, { "alpha_fraction": 0.4343731701374054, "alphanum_fraction": 0.4585050046443939, "avg_line_length": 33, "blob_id": "64bbc80b90eaae2c66d98eb6ff8c1176de852191", "content_id": "4b6abf541a1ea063b84c54087f45bdeb751bdd29", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1953, "license_type": "no_license", "max_line_length": 79, "num_lines": 50, "path": "/5.最长回文子串.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "class Solution:\n def longestPalindrome(self, s: str) -> str:\n #动态规划法解\n #时间复杂度O(n^2)\n #空间复杂度O(n^2)\n #构造一个n*n的矩阵,矩阵元素(i,j)表示s[i:j]是否为回文数\n #显然(i,i)=True,(i,j)j<i时为false\n #对于(i,i+1),当且仅当s[i]==s[i+1]时为True\n #对于(i,j),当且仅当(i+1,j-1)为True且s[i]==s[j]时为False\n n=len(s)\n answer=\"\"\n A=[[False]*n for _ in range(0,n)]#利用列表生成式生成一个n*n的矩阵\n for l in range(0,n):#子串的长度为l+1\n for i in range(0,n):#起始点,必须遍历起始点\n j=i+l#终止点\n if j>=n:#下标越界\n break\n if l==0:\n A[i][j]=True\n elif l==1:\n A[i][j]=(s[i]==s[j])\n else:\n A[i][j]=(A[i+1][j-1]) and (s[i]==s[j])\n if A[i][j] and l+1>len(answer):\n answer=s[i:j+1]\n for i in range(0,n):\n print(A[i])\n return answer\n\n def expandAroundCenter(self, s, left, right):\n while left >= 0 and right < len(s) and s[left] == s[right]:#下标不越界且扩展的相等\n left -= 1\n right += 1\n return left + 1, right - 1\n\n def longestPalindrome2(self, s: str) -> str:\n #中心拓展法\n start, end = 0, 0\n for i in range(len(s)):\n left1, right1 = self.expandAroundCenter(s, i, i)#边界条件串长为1\n left2, right2 = self.expandAroundCenter(s, i, i + 1)#边界条件串长为2\n if right1 - left1 > end - start:\n start, end = left1, right1\n if right2 - left2 > end - start:\n start, end = left2, right2\n return s[start: end + 1]\n\nif __name__ =='__main__':\n so=Solution()\n print(so.longestPalindrome('cbbad'))" }, { "alpha_fraction": 0.49439775943756104, "alphanum_fraction": 0.5154061913490295, "avg_line_length": 17.8157901763916, "blob_id": "45e2c2c9669616aee29ff2ad6992411606bfcb20", "content_id": "3f1ffe15704760547f8affad07d49e7d7754bed0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 714, "license_type": "no_license", "max_line_length": 49, "num_lines": 38, "path": "/6.Ztransform.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <string>\nusing namespace std;\n\nclass Solution{\npublic:\n string convert(string s, int numRows); \n};\n\nint main(){\n Solution so;\n string s=\"leetcodeishiring\";\n cout<<so.convert(s,3)<<endl;\n return 0;\n}\n\nstring Solution :: convert(string s,int numRows){\n string answer;\n int i=0;\n while(i<s.length()){\n answer+=s[i];\n i+=(numRows*2-2);\n }\n for(i=1;i<numRows-1;i++){\n int j=i;\n while(j<s.length()){\n answer+=s[j];\n answer+=s[j+numRows*2-2-2*i];\n j+=(numRows*2-2);\n }\n }\n i=numRows-1;\n while(i<s.length()){\n answer+=s[i];\n i+=(numRows*2-2);\n }\n return answer;\n}" }, { "alpha_fraction": 0.45859214663505554, "alphanum_fraction": 0.48447203636169434, "avg_line_length": 23.174999237060547, "blob_id": "e61acbd7a133e9ec256e60a912f1872322ca9ecf", "content_id": "c6f1a03e9b56dbaf46f6ae4c091a533508dd0006", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 966, "license_type": "no_license", "max_line_length": 68, "num_lines": 40, "path": "/2.两数相加.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n def makelist(self,num:int) -> ListNode:\n if num==0:\n list=ListNode(0)\n return list\n first=ListNode(0)\n temp=first\n while not num==0:\n toadd=num%10\n num=num//10\n list=ListNode(toadd)\n temp.next=list\n temp=list\n return first.next\n\n\n\n def calnum(self,l1:ListNode) -> int:\n num=0\n base=1\n while True:\n num+=l1.val*base\n base*=10\n if not l1.next==None:\n l1=l1.next\n else:\n print(num)\n return num\n \n def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:\n num1=self.calnum(l1)\n num2=self.calnum(l2)\n sum=num1+num2\n return self.makelist(sum)" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 6.5714287757873535, "blob_id": "1f97f867538bd379232c3b936b8c8093e081145b", "content_id": "15d6968d2626984a5a7d874f14fbd8bcb231b985", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 84, "license_type": "no_license", "max_line_length": 18, "num_lines": 7, "path": "/readme.md", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "# Leetcode刷题记录\nLeetcode的刷题记录,随时更新\n\n# 语言\npython\nC++\nC" }, { "alpha_fraction": 0.4952789843082428, "alphanum_fraction": 0.5150214433670044, "avg_line_length": 21.86274528503418, "blob_id": "636bbdb71c1ef42bfd406bd1fc490276eb90bbb1", "content_id": "4b1b691bddbb2f4062bbee65ad5493376669a953", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1177, "license_type": "no_license", "max_line_length": 51, "num_lines": 51, "path": "/33.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\nusing namespace std;\n\nclass Solution{\npublic:\n int search(vector<int>& nums, int target);\n};\n\nint Solution::search(vector<int> &nums,int target){\n if(!nums.size()) return -1;\n int left=0;\n int right=nums.size()-1;\n int mid;\n while(left<=right){\n mid=(left+right)/2;\n if((mid>0&&nums[mid]<nums[mid-1])) break;\n if(nums[mid]<*(--nums.end())) right=mid-1;\n else left=mid+1;;\n }\n cout<<mid<<endl;\n if(target>*(--nums.end())){//在左半区里找\n left=0;\n right=mid-1;\n while(left<=right){\n mid=(left+right)/2;\n if(nums[mid]==target) return mid;\n if(nums[mid]<target) left=mid+1;\n else right=mid-1;\n }\n }else{\n left=mid;\n right=nums.size()-1;\n while(left<=right){\n mid=(left+right)/2;\n if(nums[mid]==target) return mid;\n if(nums[mid]<target) left=mid+1;\n else right=mid-1;\n } \n }\n return -1;\n\n}\n\nint main(){\n vector<int> nums={0,1,2};\n int target=1;\n Solution so;\n cout<<so.search(nums,target)<<endl;\n return 0;\n}" }, { "alpha_fraction": 0.5491803288459778, "alphanum_fraction": 0.5625931620597839, "avg_line_length": 25.860000610351562, "blob_id": "7f7542f7e0ba71309762a81325fc3f7a58e9de75", "content_id": "36bf3af7c942ca08280ad542692f5133d894637d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1388, "license_type": "no_license", "max_line_length": 75, "num_lines": 50, "path": "/15.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nclass Solution{\npublic:\n vector<vector<int>> threeSum(vector<int>& nums);\n int print(vector<int> & nums);\n};\n\nvector<vector<int>> Solution :: threeSum(vector<int> & nums){\n vector<vector<int>> answer;\n int n=nums.size();\n sort(nums.begin(),nums.end());\n for(int first=0;first<n;first++){\n if(first>0&&nums[first]==nums[first-1]) continue;//如果和上一个重复\n for(int second=first+1;second<n;second++){\n if(second>0&&nums[second]==nums[second-1]) continue;\n int third=n-1;//此时第三个指针指向末尾\n while(third>second&&(nums[first]+nums[second]+nums[third])>0){\n third--;\n }\n if(second==third) break;\n if((nums[first]+nums[second]+nums[third])==0){//满足条件\n vector<int> element={nums[first],nums[second],nums[third]};\n answer.push_back(element);\n }\n }\n }\n return answer;\n}\n\n\nint Solution :: print(vector<int> &nums){\n vector<vector<int>> answers=this->threeSum(nums);\n int n=answers.size();\n for(int i=0;i<n;i++){\n for(int j=0;j<3;j++) cout<<answers[i][j]<<',';\n cout<<endl;\n }\n return n;\n}\n\n\nint main(){\n Solution so;\n vector<int> nums={-1,0,1,2,-1,-4};\n return so.print(nums);\n}" }, { "alpha_fraction": 0.5991082191467285, "alphanum_fraction": 0.6059991717338562, "avg_line_length": 24.183673858642578, "blob_id": "7076e1c340a0baabe16d132e3a73d86d9287eb7c", "content_id": "0eba966ee41ff36f9313ee8c001da15d92dcfe51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2801, "license_type": "no_license", "max_line_length": 96, "num_lines": 98, "path": "/51.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n#include <string>\n#include <set>\nusing namespace std;\n\nclass Solution{\nprivate:\n int N;\n vector<bool> columns;//当前列是否可以放置\n vector<bool> left_diagonals;//左向对角线是否可以放置\n vector<bool> right_diagonals;//右向对角线是否可以放置\n set<pair<int,int>> qipan;//保存皇后的位置\n vector<vector<string>> answers;//最后的结果\n\n\npublic:\n vector<vector<string>> solveNQueens(int n);\n void backtrack(int);\n bool is_place(int,int);\n void place(int,int);\n void displace(int,int);\n void addanswer();\n};\n\nvoid Solution::addanswer(){//将当前set集合中的点转化为\n vector<string> possible_answer(N);\n set<pair<int,int>>:: iterator iter=qipan.begin();\n while(iter!=qipan.end()){\n string s(N,'.');\n int rows=(*iter).first;\n int cols=(*iter).second;\n s[cols]='Q';\n possible_answer[rows]=s;\n iter++;\n }\n answers.push_back(possible_answer);\n}\n\nbool Solution::is_place(int row,int col){\n return columns[col]&&left_diagonals[row+col]&&right_diagonals[N+row-col-1];//必须都是true\n}\n\nvoid Solution::displace(int row,int col){\n pair<int,int> queen(row,col);\n qipan.erase(queen);\n columns[col]=true;//标记不可放置区域\n left_diagonals[row+col]=true;\n right_diagonals[N+row-col-1]=true; \n}\n\nvoid Solution::place(int row,int col){\n pair<int,int> queen(row,col);\n qipan.insert(queen);//放置在棋盘上\n columns[col]=false;//标记不可放置区域\n left_diagonals[row+col]=false;\n right_diagonals[N+row-col-1]=false;\n\n}\n\nvector<vector<string>> Solution:: solveNQueens(int n){\n columns.resize(n);\n left_diagonals.resize(2*n-1);\n right_diagonals.resize(2*n-1);\n N=n;\n for(int i=0;i<columns.size();i++) columns[i]=true;//初始化为可放置\n for(int i=0;i<left_diagonals.size();i++) left_diagonals[i]=right_diagonals[i]=true;//初始化为可放置\n backtrack(0);\n return answers;\n}\n\nvoid Solution::backtrack(int row){//回溯法,从第n行开始\n for(int col=0;col<N;col++){//判断第row行,第col列是否可以放置棋子\n if(is_place(row,col)){//可以放置\n place(row,col);\n if(row==N-1){//向结果集合中加入本结果【同时也是递归的退出条件】\n addanswer();\n }else{//递归调用回溯法\n backtrack(row+1);\n }\n displace(row,col);\n }//不可以放置直接探索下一个位置\n }\n}\n\n\nint main(){\n int n=5;\n Solution so;\n vector<vector<string>> ans=so.solveNQueens(n);\n for(int i=0;i<ans.size();i++){\n for(int j=0;j<n;j++){\n cout<<ans[i][j]<<endl;\n }\n cout<<endl;\n }\n return 0;\n}" }, { "alpha_fraction": 0.5548281669616699, "alphanum_fraction": 0.579378068447113, "avg_line_length": 20.85714340209961, "blob_id": "0a4292113e1d74ce3c38f44492bf9baf46b73338", "content_id": "642f4ea65a759b161f40eb32f5f888041eb7f6f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 611, "license_type": "no_license", "max_line_length": 59, "num_lines": 28, "path": "/53.max_sum.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n#include <algorithm>\nusing namespace std;\n\nclass Solution{\npublic:\n int maxSubArray(vector<int>& nums);\n};\n\nint main(){\n Solution so;\n vector<int> nums={-2,1,-3,4,-1,2,1,-5,4};\n cout<<so.maxSubArray(nums)<<endl;\n return 0;\n}\n\nint Solution::maxSubArray(vector<int> & nums){\n vector<int> max_until(nums.size());\n for(int i=0;i<nums.size();i++){\n if(i==0){\n max_until[0]=nums[0];\n continue;\n }\n max_until[i]=max(max_until[i-1]+nums[i],nums[i]);\n }\n return *max_element(max_until.begin(),max_until.end());\n}" }, { "alpha_fraction": 0.6145161390304565, "alphanum_fraction": 0.6231182813644409, "avg_line_length": 23.16883087158203, "blob_id": "a41904fafae708bb5348c19ba69e80e222cc970d", "content_id": "7d52c4359b490fa01ad6d657712fedb4a233bff8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2162, "license_type": "no_license", "max_line_length": 96, "num_lines": 77, "path": "/52.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n#include <string>\n#include <set>\nusing namespace std;\n\nclass Solution{\nprivate:\n int N;\n int answers;\n vector<bool> columns;//当前列是否可以放置\n vector<bool> left_diagonals;//左向对角线是否可以放置\n vector<bool> right_diagonals;//右向对角线是否可以放置\n set<pair<int,int>> qipan;//保存皇后的位置\n\npublic:\n int totalNQueens(int n);\n void backtrack(int);\n bool is_place(int,int);\n void place(int,int);\n void displace(int,int);\n};\n\nbool Solution::is_place(int row,int col){\n return columns[col]&&left_diagonals[row+col]&&right_diagonals[N+row-col-1];//必须都是true\n}\n\nvoid Solution::displace(int row,int col){\n pair<int,int> queen(row,col);\n qipan.erase(queen);\n columns[col]=true;//标记不可放置区域\n left_diagonals[row+col]=true;\n right_diagonals[N+row-col-1]=true; \n}\n\nvoid Solution::place(int row,int col){\n pair<int,int> queen(row,col);\n qipan.insert(queen);//放置在棋盘上\n columns[col]=false;//标记不可放置区域\n left_diagonals[row+col]=false;\n right_diagonals[N+row-col-1]=false;\n\n}\n\nint Solution:: totalNQueens(int n){\n columns.resize(n);\n left_diagonals.resize(2*n-1);\n right_diagonals.resize(2*n-1);\n N=n;\n answers=0;\n for(int i=0;i<columns.size();i++) columns[i]=true;//初始化为可放置\n for(int i=0;i<left_diagonals.size();i++) left_diagonals[i]=right_diagonals[i]=true;//初始化为可放置\n backtrack(0);\n return answers;\n}\n\nvoid Solution::backtrack(int row){//回溯法,从第n行开始\n for(int col=0;col<N;col++){//判断第row行,第col列是否可以放置棋子\n if(is_place(row,col)){//可以放置\n place(row,col);\n if(row==N-1){//向结果集合中加入本结果【同时也是递归的退出条件】\n answers++;\n }else{//递归调用回溯法\n backtrack(row+1);\n }\n displace(row,col);\n }//不可以放置直接探索下一个位置\n }\n}\n\n\nint main(){\n int n=5;\n Solution so;\n cout<<so.totalNQueens(n);\n return 0;\n}" }, { "alpha_fraction": 0.6801801919937134, "alphanum_fraction": 0.684684693813324, "avg_line_length": 18.34782600402832, "blob_id": "c045962bc2373baed0e506847eaafb249d3bd87e", "content_id": "e1aeaaf030c859a4baa9c1d7d93ea83fca20cd6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 480, "license_type": "no_license", "max_line_length": 52, "num_lines": 23, "path": "/28.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <string>\n#include <cmath>\nusing namespace std;\n\nclass Solution{\npublic:\n int strstr(string haystack,string needle);\n};\n\nint Solution::strstr(string haystack,string needle){\n //算了,直接调用库函数吧,研究什么KMP算法\n if(needle.empty()) return 0;\n return haystack.find(needle);\n}\n\nint main(){\n string haystack=\"hello\";\n string needle=\"ll\";\n Solution so;\n cout<<so.strstr(haystack,needle)<<endl;\n return 0;\n}" }, { "alpha_fraction": 0.4457142949104309, "alphanum_fraction": 0.47428572177886963, "avg_line_length": 20.040000915527344, "blob_id": "6754dd3e869e210c5d3623ff650f643b464aee3f", "content_id": "5fd347483586ef25dae86912a206f8d943b8c90a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 573, "license_type": "no_license", "max_line_length": 47, "num_lines": 25, "path": "/9.回文数.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "##To do\n##可以优化,只需要翻转一半的数字\nclass Solution:\n def isPalindrome(self,x:int)->bool:\n if x<0:\n return False\n ##要求不转换为字符串\n mylist=[]\n while x>0:\n mylist.append(x%10)\n x//=10\n left=0\n right=len(mylist)-1\n while left<right:\n if not mylist[right]==mylist[left]:\n return False\n right-=1\n left+=1\n return True \n\n\n\nif __name__=='__main__':\n so=Solution()\n print(so.isPalindrome(12321))" }, { "alpha_fraction": 0.42073169350624084, "alphanum_fraction": 0.4359756112098694, "avg_line_length": 24.269229888916016, "blob_id": "985d9ed184ba97269a05a4ac4102d93e8a850a8f", "content_id": "7c4831165ec9d8b5844cce4915ef17274a56f16d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 678, "license_type": "no_license", "max_line_length": 54, "num_lines": 26, "path": "/3.无重复字符的最长子串.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n if not s:\n return 0\n left=0\n right=1\n myset=set()\n myset.add(s[0])\n lenth=1\n max=1\n while right<len(s):\n if s[right] not in myset:#没有重复字符,窗口变长\n myset.add(s[right])\n lenth+=1\n if lenth>max:\n max=lenth\n right+=1\n else:\n myset.remove(s[left])\n lenth-=1\n left+=1\n return max\n\nif __name__ =='__main__':\n so=Solution()\n print(so.lengthOfLongestSubstring('pwwkew'))" }, { "alpha_fraction": 0.4112820625305176, "alphanum_fraction": 0.43282049894332886, "avg_line_length": 24.657894134521484, "blob_id": "c74fbf614a2176fff40f2bbad9b344818d85cd14", "content_id": "58170d1e7e9668af72140251dfdf0ac426e1e8e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 975, "license_type": "no_license", "max_line_length": 68, "num_lines": 38, "path": "/21.合并两个有序链表.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "\nclass ListNode:\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\nclass Solution:\n def printList(self,l:ListNode) -> ListNode:\n while not l == None:\n print(l.val)\n l=l.next\n print('end')\n\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n l=ListNode()\n start=l\n while (not l1==None) and (not l2==None):\n if l1.val<l2.val:\n l.next=l1\n l1=l1.next\n l=l.next\n l.next=None\n else:\n l.next=l2\n l2=l2.next\n l=l.next\n l.next=None\n self.printList(l)\n while not l1==None:\n l.next=l1\n l1=l1.next\n l=l.next\n l.next=None\n while not l2==None:\n l.next=l2\n l2=l2.next\n l=l.next\n l.next=None\n return start.next" }, { "alpha_fraction": 0.5234042406082153, "alphanum_fraction": 0.5446808338165283, "avg_line_length": 17.115385055541992, "blob_id": "0cd62e2b96c4fa012328f3a98d984f63303e0ce4", "content_id": "00e230336f0f048fcac76084d41d42348217be34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 470, "license_type": "no_license", "max_line_length": 78, "num_lines": 26, "path": "/64.minpath.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\nusing namespace std;\n\nclass Solution{\npublic:\n int minPathSum(vector<vector<int>>& grid);\n};\n\nint Solution::minPathSum(vector<vector<int>>& grid){\n int m=grid.size();\n int n=grid[0].size();\n int i,j;\n for(i=1;i<m;i++){\n for(j=1;j<n;j++){\n grid[i][j]+=(grid[i-1][j]<grid[i][j-1])?grid[i-1][j]:grid[j-1][i];\n }\n }\n return grid[i-1][j-1];\n}\n\nint main(){\n Solution mySo;\n\n return 0;\n}" }, { "alpha_fraction": 0.45132744312286377, "alphanum_fraction": 0.47345131635665894, "avg_line_length": 31.314285278320312, "blob_id": "04f3476f5fa353302a2fbcf5a1400c4f2dd1d19a", "content_id": "577b3f41ebae92ebfdcef990d210560701894642", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1160, "license_type": "no_license", "max_line_length": 66, "num_lines": 35, "path": "/42.接雨水.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "from typing import List\nclass Solution:\n def trap_stack(self,height:List[int]) -> int:\n stack=[]\n sum=0\n for i,num in enumerate(height):\n while (not stack==[]) and (num>height[stack[-1]]):\n top=stack.pop()\n if stack==[]:\n break\n distance=i-1-stack[-1]\n size_height=min(num,height[stack[-1]])-height[top]\n sum+=distance*size_height\n stack.append(i)\n return sum\n\n def trap(self, height: List[int]) -> int:\n max_layer=max(height)\n sum=0##总雨量\n for layers in range(1,max_layer+1):#分层计算可以容纳的雨水数\n for left in range(0,len(height)):\n if height[left]>=layers:\n break\n for right in range(len(height)-1,-1,-1):\n if height[right]>=layers:\n break\n lay=height[left:right]\n for num in lay:\n if num<layers:\n sum+=1\n return sum \n\nif __name__=='__main__':\n so=Solution()\n print(so.trap_stack([0,1,0,2,1,0,1,3,2,1,2,1]))" }, { "alpha_fraction": 0.38247862458229065, "alphanum_fraction": 0.4316239356994629, "avg_line_length": 21.33333396911621, "blob_id": "c53e46d39c0b82b6ab59c2c7e956246192fb0e25", "content_id": "6235fbfa315d3e91caf78830227dfe57b7c70c8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 468, "license_type": "no_license", "max_line_length": 49, "num_lines": 21, "path": "/7.整数反转.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "class Solution:\n def reverse(self, x: int) -> int:\n reverse=0\n flag=True\n if x<0:\n x=-1*x\n flag=False\n while not x==0:\n reverse*=10\n r=x%10\n x=x//10\n reverse+=r\n if not flag:\n reverse*=-1\n if reverse>(2**31)-1 or reverse<-1*2**31:\n reverse=0\n return reverse\n\nif __name__=='__main__':\n so=Solution()\n print(so.reverse(125))" }, { "alpha_fraction": 0.494923859834671, "alphanum_fraction": 0.5126903653144836, "avg_line_length": 29.384614944458008, "blob_id": "0b249694447539042cb327c554876947d665953a", "content_id": "306cd3d7e03ead57472007196d01b086ba72d9c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 394, "license_type": "no_license", "max_line_length": 64, "num_lines": 13, "path": "/1.两数之和.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "from typing import List\nclass Solution:\n def twoSum(self, nums: List[int], target: int) -> List[int]:\n dict={}\n for i in range(0,len(nums)):\n if (target-nums[i]) in dict:\n return (dict[target-nums[i]],i)\n if nums[i] not in dict:\n dict[nums[i]]=i\n\nif __name__=='__main__':\n so=Solution()\n print(so.twoSum([1,2,3,4,6],5))" }, { "alpha_fraction": 0.4580310881137848, "alphanum_fraction": 0.4797927439212799, "avg_line_length": 18.714284896850586, "blob_id": "8075321ab92db4a6afd21a4fe800986077409375", "content_id": "0c94274dd824c74d52685df1412868823dcaa782", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 977, "license_type": "no_license", "max_line_length": 72, "num_lines": 49, "path": "/8.字符串转换整数(atoi).c", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "/*\nTo do list:没有处理溢出\n*/\n#include <stdio.h>\n#include <limits.h>\nchar * delspace(char * str){\n char * p=str;\n while(*p==' ') p++;\n return p;\n}\n\nint myAtoi(char *str){\n char * newstr=delspace(str);\n if(*newstr!='+'&&*newstr!='-'&&(*newstr<'0'||*newstr>'9')) return 0;\n int negetive_flag=0;\n if(*newstr=='-'){\n negetive_flag=1;\n newstr++;\n }else if(*newstr=='+') newstr++;\n\n int num=0;\n int overflow_flag=0;\n while(*newstr>='0'&&*newstr<='9'){\n if(INT_MAX/10<num){\n overflow_flag=1;\n break;\n }\n num*=10;\n if(INT_MAX-(*newstr-'0')<num){\n overflow_flag=1;\n break;\n }\n num+=*newstr-'0';\n newstr++;\n }\n \n if(negetive_flag){\n if(overflow_flag) return INT_MIN;\n num*=-1;\n }\n if(overflow_flag) return INT_MAX;\n return num;\n}\n\nint main(){\n char * str=\"42\";\n printf(\"%d\",myAtoi(str));\n return 0;\n}" }, { "alpha_fraction": 0.4092009663581848, "alphanum_fraction": 0.433817595243454, "avg_line_length": 24.275510787963867, "blob_id": "4b4d35191195d31f2a54d3dc3ac61df63b713bb4", "content_id": "e9e34c2ae6fb2269c093954848268bc9066b0337", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2596, "license_type": "no_license", "max_line_length": 102, "num_lines": 98, "path": "/37.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n#include <set>\nusing namespace std;\n\nclass Solution{\nprivate:\n //行、列、9宫格的不可选集\n vector<set<int>> rows;\n vector<set<int>> cols;\n vector<set<int>> tabs;\n vector<vector<char>> boards;\n bool flag;\npublic:\n void solveSudoku(vector<vector<char>>& board);\n int tabnum(int,int);\n void backtrack(int,int);\n void place(int,int,int);\n void displace(int,int,int);\n};\n\nvoid Solution::displace(int i,int j,int m){\n rows[i].erase(m);\n cols[j].erase(m);\n tabs[tabnum(i,j)].erase(m);\n boards[i][j]='.';\n}\n\nvoid Solution::place(int i,int j,int m){\n rows[i].insert(m);\n cols[j].insert(m);\n tabs[tabnum(i,j)].insert(m);\n boards[i][j]=m+'0';\n}\n\nint Solution::tabnum(int i,int j){\n return 3*(i/3)+j/3;\n}\n\nvoid Solution::backtrack(int i,int j){\n if(boards[i][j]!='.'){//如果已经预置,开启下一次回溯\n if(i==8&&j==8){\n flag=true;\n return;\n }\n if(j!=8) backtrack(i,j+1);\n else backtrack(i+1,0);\n }else{\n for(int m=1;m<=9;m++){//试探可以为几\n if(rows[i].count(m)+cols[j].count(m)+tabs[tabnum(i,j)].count(m)) continue;//是否冲突,冲突直接测试下一个\n place(i,j,m);//放置\n if((i==8&&j==8)||flag){//终止回溯\n flag=true;\n return;\n }\n else{\n if(j!=8) backtrack(i,j+1);\n else backtrack(i+1,0);\n if(flag) return;\n displace(i,j,m);\n }\n }\n }\n}\n\nvoid Solution :: solveSudoku(vector<vector<char>> & board){\n boards=board;\n flag=false;\n //初始化各个可选集\n rows.resize(9);\n cols.resize(9);\n tabs.resize(9);\n for(int i=0;i<9;i++){\n for(int j=0;j<9;j++){\n if(board[i][j]=='.') continue;\n rows[i].insert(board[i][j]-'0');\n cols[j].insert(board[i][j]-'0');\n tabs[tabnum(i,j)].insert(board[i][j]-'0');\n }\n }\n backtrack(0,0);\n board=boards;\n}\n\nint main(){\n Solution s;\n vector<vector<char>> board={\n {'5','3','.','.','7','.','.','.','.'},\n {'6','.','.','1','9','5','.','.','.'},\n {'.','9','8','.','.','.','.','6','.'},\n {'8','.','.','.','6','.','.','.','3'},\n {'4','.','.','8','.','3','.','.','1'},\n {'7','.','.','.','2','.','.','.','6'},\n {'.','6','.','.','.','.','2','8','.'},\n {'.','.','.','4','1','9','.','.','5'},\n {'.','.','.','.','8','.','.','7','9'}};\n s.solveSudoku(board);\n}\n\n" }, { "alpha_fraction": 0.30550283193588257, "alphanum_fraction": 0.33776092529296875, "avg_line_length": 24.119047164916992, "blob_id": "267dfac0092c9c6835b15c713f0c8b5729ab2e5a", "content_id": "3c91e65d8674709201116b2c661564d18168c7f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1054, "license_type": "no_license", "max_line_length": 51, "num_lines": 42, "path": "/12.py", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "class Solution:\n def intToRoman(self,num:int)->str:\n s=''\n Roman=[['I','V'],['X','L'],['C','D'],['M']]\n Thousand=num//1000\n Hundred=(num%1000)//100\n Ten=(num%100)//10\n Inter=num%10\n if Thousand:\n s+=(Thousand*'M')\n if Hundred:\n if Hundred<=3:\n s+=(Hundred*'C')\n elif Hundred==4:\n s+='CD'\n elif Hundred<9:\n s+='D'+(Hundred-5)*'C'\n else:\n s+='CM'\n if Ten:\n if Ten<=3:\n s+='X'*Ten\n elif Ten==4:\n s+='IX'\n elif Ten<9:\n s+='L'+(Ten-5)*'X'\n else:\n s+='XC'\n if Inter:\n if Inter<=3:\n s+='I'*Inter\n elif Inter==4:\n s+='IV'\n elif Inter<9:\n s+='V'+(Inter-5)*'I'\n else:\n s+='IX'\n return s\n\nif __name__=='__main__':\n so=Solution()\n print(so.intToRoman(1000))" }, { "alpha_fraction": 0.42181816697120667, "alphanum_fraction": 0.43272727727890015, "avg_line_length": 21.933332443237305, "blob_id": "7e6f6e0b9bc8806d9140efd974d6597c253a393b", "content_id": "81baf3d84d835a799b368548c515fe6eb69e9ad2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1375, "license_type": "no_license", "max_line_length": 53, "num_lines": 60, "path": "/150.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\n#include <stack>\nusing namespace std;\n\nclass Solution{\npublic:\n int evalRPN(vector<string>& tokens);\n int symbol(string str); \n};\n\nint Solution :: symbol(string str){\n if(str==\"+\") return 1;\n if(str==\"-\") return 2;\n if(str==\"*\") return 3;\n if(str==\"/\") return 4;\n return 0;\n}\n\nint Solution :: evalRPN(vector<string>& tockens){\n stack<int> mystack;\n int n=tockens.size();\n int op;\n for(int i=0;i<n;i++){\n if(op=symbol(tockens[i])){\n int right=mystack.top();\n mystack.pop();\n int left=mystack.top();\n mystack.pop();\n switch(op){\n case 1:{\n mystack.push(left+right);\n break;\n }\n case 2:{\n mystack.push(left-right);\n break;\n }\n case 3:{\n mystack.push(left*right);\n break;\n }\n case 4:{\n mystack.push(left/right);\n break;\n }\n }\n }else{\n mystack.push(stoi(tockens[i]));\n } \n }\n return mystack.top();\n}\n\nint main(){\n Solution so;\n vector<string> tocken={\"4\", \"13\", \"5\", \"/\", \"+\"};\n cout<<so.evalRPN(tocken);\n return 0;\n}" }, { "alpha_fraction": 0.5915697813034058, "alphanum_fraction": 0.6104651093482971, "avg_line_length": 21.225807189941406, "blob_id": "0cdcf4d69868ad163250e8fe1f633635f60c8982", "content_id": "4cc01a7b124f21eba0d8ef3287851f9b89c5e583", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 688, "license_type": "no_license", "max_line_length": 57, "num_lines": 31, "path": "/35.findposition.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\nusing namespace std;\n\nclass Solution{\npublic:\n int searchInsert(vector<int>& nums,int target);\n};\n\nint main(){\n Solution so;\n vector<int> nums={1,3,5,6};\n int answer=so.searchInsert(nums,5);\n cout<<answer<<endl;\n}\n\nint Solution::searchInsert(vector<int>& nums,int target){\n if(nums.empty()) return 0;\n int left=0;\n int right=nums.size()-1;\n int mid;\n while(left<=right){\n if(target<=nums[left]) return left;\n if(target>nums[right]) return right+1;\n mid=(left+right)/2;\n if(target==nums[mid]) return mid;\n if(target>nums[mid]) left=mid+1;\n else right=mid-1;\n }\n return 0;\n}" }, { "alpha_fraction": 0.4785478413105011, "alphanum_fraction": 0.5467546582221985, "avg_line_length": 26.280000686645508, "blob_id": "6d748f940b4d8dc85b686ce3d0ae73c057865768", "content_id": "352c75ead6a038dca3f3fe6d9ff551f68966f737", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3199, "license_type": "no_license", "max_line_length": 83, "num_lines": 100, "path": "/4.mid.cpp", "repo_name": "duowen1/learngit", "src_encoding": "UTF-8", "text": "#include <iostream>\n#include <vector>\nusing namespace std;\n\n/*\n实际上遍历的是空格\n*/\n\nclass Solution{\npublic:\n double findMedianSortedArrays(vector<int>& nums1,vector <int>& nums2);\n};\n\ndouble Solution :: findMedianSortedArrays(vector<int>& nums1, vector <int>& nums2){\n /*\n 如果合并两个数组,再求中位数,复杂度为O(m+n)\n 要求复杂度为O(log(m+n)),又是有序数组,故想到二分查找\n 假设两个数组已经被合并成一个大数组,可以得知,中位数前面的共有(m+n)/2个元素\n 这些元素都来自于两个数组,不妨设nums1贡献了l1个,nums2贡献了l2个,显然l1+l2=(m+n)/2\n 根据中位数的特点可以得知,nums1[l1]<=mid<=nums2[l2+1],且nums2[l2]<=mid<=nums1[l1+1]\n 故可以通过二分查找l1或者l2获得中位数\n */\n int L1max;\n int R1min;\n int L2max;\n int R2min;\n int nums1_n=nums1.size();\n int nums2_n=nums2.size();\n int length=nums1_n+nums2_n;//总数\n if(nums1_n>nums2_n) return findMedianSortedArrays(nums2,nums1);//保证了第一个数组的长度较小\n if(nums1_n==0){//返回nums2的中位数\n if(length%2) return nums2[length/2];\n else return (nums2[length/2]+nums2[length/2-1])/2.0;\n }\n if(nums1[nums1_n-1]<=nums2[0]){//nums1整体小于nums2\n if(length%2){//odd\n int left=length/2-nums1_n;\n return nums2[left];\n }else{\n if(nums1_n==nums2_n) return (nums1[nums1_n-1]+nums2[0])/2.0;\n else{\n int left=length/2-nums1_n;\n return (nums2[left]+nums2[left-1])/2.0;\n }\n }\n }\n if(nums1[0]>=nums2[nums2_n-1]){//nums1整体大于nums2\n if(length%2){\n return nums2[length/2];\n }else{\n if(nums1_n==nums2_n) return (nums1[0]+nums2[nums2_n-1])/2.0;\n else{\n return (nums2[length/2]+nums2[length/2-1])/2.0;\n }\n }\n }\n\n int left=0;\n int right=nums1_n-1;\n int mid,mid_2;\n while(left<=right){\n /*\n L1=nums1[0:mid],R1=nums1[mid+1:nums1_n-1]\n L2=nums1[0:mid_2],R2=nums2[mid_2+1,nums2_n-1]\n */\n mid=(left+right)/2;//有mid+1个\n mid_2=(length+1)/2-(mid+1)-1;\n /*\n 总数是偶数时,L1、L2数量和为总数的一半\n 总数时奇数时,L1、L2数量和为总数+1的一半\n */\n L1max=(mid==-1?INT_MIN:nums1[mid]);\n R1min=(mid==nums1_n-1?INT_MAX:nums1[mid+1]);\n L2max=(mid_2==-1?INT_MIN:nums2[mid_2]);\n R2min=(mid_2==nums2_n-1?INT_MAX:nums2[mid_2+1]);\n if(L1max<=R2min){\n /*\n 此时满足条件1,满足条件2时,退出\n */\n if(R1min>=L2max) break;\n left=mid+1;\n }else{\n right=mid-1;\n }\n }\n //奇数返回中值,偶数返回平均数\n\n if(length%2){//old\n return max(nums1[mid],nums2[mid_2]);\n }else{\n return (max(nums1[mid],nums2[mid_2])+min(R1min,R2min))/2.0;\n }\n}\n\nint main(){\n vector<int> nums1={4};\n vector<int> nums2={1,2,3,5};\n Solution so;\n cout<<so.findMedianSortedArrays(nums1,nums2)<<endl;\n}" } ]
26
DevHyung/Foursqure-Scrapping
https://github.com/DevHyung/Foursqure-Scrapping
678201dcb06921682b9bb4b878953e824d2a669d
d0093ad30e6a476dbf52f8d19bd0d9b8b269ffce
68400ca0c49d0f6140dc5f3a0d62abe377db8280
refs/heads/master
2020-06-24T23:13:22.455944
2017-07-12T00:23:52
2017-07-12T00:23:52
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5227599143981934, "alphanum_fraction": 0.533642053604126, "avg_line_length": 43.60123825073242, "blob_id": "f0f7abb456ab86a0064585eb7b4e0a6f534aabf3", "content_id": "c729cd0f698c357a004547da9b4a52d0ca762527", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23401, "license_type": "no_license", "max_line_length": 117, "num_lines": 484, "path": "/main.py", "repo_name": "DevHyung/Foursqure-Scrapping", "src_encoding": "UTF-8", "text": "#_*_ coding:utf-8 _*_\nimport re\nfrom bs4 import BeautifulSoup\nimport xlsxwriter\nimport lxml\nimport math\nimport urllib\nfrom six.moves import urllib\nfrom urllib import quote\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nclass restaurant:\n def __init__(self, bs,menubs):\n self.r_rating = 0 #rating\n self.r_name = \"\" #음식점이름\n self.r_address = \"\" #음식점주소\n self.r_menulist = [] #메뉴리스트\n self.r_reviewtotalcnt = 0 #총리뷰수\n self.r_bs = bs #해당 레스토랑 bs파싱내용\n self.r_menubs = menubs#menu bs파싱내용\n self.r_time = \"\"\n def get_info_by_bs(self):\n try:\n self.r_name = self.r_bs.find(itemprop='name').get_text()\n except:\n self.r_name = \"open error\"\n try:\n self.r_rating = self.r_bs.find(itemprop ='ratingValue').get_text()\n except:\n self.r_rating = 0\n try:\n self.r_address = self.r_bs.find(itemprop ='address').get_text()\n except:\n self.r_address = \"open error\"\n try:\n venuekey = self.r_bs.find_all(\"div\", class_=\"venueRowKey\")\n venueall = self.r_bs.find_all(\"div\", class_=\"venueRowValue\")\n for idx in range(len(venuekey)):\n tmptxt =venuekey[idx].get_text().encode(\"utf-8\")\n if tmptxt == \"메뉴\":\n self.r_time = venueall[idx].get_text().encode(\"utf-8\")\n break;\n except:\n self.r_time = \"\"\n if self.r_menubs != None: #메뉴판있음\n self.r_menulist = self.r_menubs.find_all(\"div\", class_=\"menuHeader\")\n try:\n self.r_reviewtotalcnt = self.r_bs.find(\"span\", class_=\"sectionCount\").get_text()\n self.r_reviewtotalcnt = self.r_reviewtotalcnt.replace(',', '')\n except:\n self.r_reviewtotalcnt = 49\n\n for idx in range(len(self.r_menulist)):\n self.r_menulist[idx] = self.r_menulist[idx].get_text().encode(\"utf-8\")\n#print self.r_reviewtotalcnt\n#print self.r_menulist, len(self.r_menulist)\n#print self.r_name\n#print self.r_rating\n#print self.r_address\nclass human:\n def __init__(self, link, name, date):\n self.name = name#name\n self.date = date#날짜\n self.link = link # 아이뒤링크\n self.tip = 0 #팁개수\n self.flink = \"\"#페이스북링크\n def printinfo(self):\n print self.name, self.date, self.tip, self.flink\n def set_tip_and_flink(self,_tip,_flink):\n self.tip =_tip\n self.flink = _flink\nclass collection:\n def __init__(self, url,totalcnt,firstbs):\n self.c_menulist = [] # 언급된 메뉴리스트\n self.c_reviewlist = [] # 키워드가 언급된 리뷰리스트 1차원@\n self.c_menu_scorelist_2d = [] #각메뉴의 5등급 스코어 점수가 저장된 2차원배열\n self.c_menu_categorylist_2d = [] # 각메뉴의 카테고리가 저장되어 있는 2차원배열\n self.c_menu_foodchar = [] #음식특징\n self.c_imglist = [] # 이미지경로저장할 배열@\n self.c_humanlist = [] # humaninfo를 저장할 클래스 리스트\n self.c_totalreviewcnt = totalcnt\n self.c_url = url\n self.c_bs = firstbs\n self.c_foodidxlist = [] # 실행속도를 높이기위해 food가 있는 review 리스트의 idx\n def extract_userinfo_byurl(self):\n errorlist = []\n for human in self.c_humanlist:\n url = \"https://ko.foursquare.com\" + human.link\n bs = get_bs_by_url(url)\n if bs is not None:\n try:\n tip = bs.find(\"span\", class_=\"stat\").find(\"strong\").get_text()\n except:\n print \"지금오류걸림\"\n print human.link\n tip = 0\n try:# 페북링크없으면\n flink = bs.find(\"a\", class_=\"fbLink iconLink\")\n flink = flink['href']\n except:\n flink = \"\"\n human.set_tip_and_flink(tip,flink)\n #human.printinfo()\n else:\n print \"userinfo_byurl error 3->\", human.link\n errorlist.append(human)\n def extract_food(self, menulist):\n global c_keword\n global G_categorylist\n global G_footcharlist\n if not menulist == []: #메뉴가 있으면\n for food in menulist:\n foodscore = [0 for i in range(c_keword.gradecnt)] # 푸드마다 스코어 리스트생성\n idx = 0\n foodtxt = food.lower()\n for review in self.c_reviewlist:\n foodchartxt = \"\" # 음식char생성\n foodcategory = \"\" # 푸드카테고리생성\n reviewtxt = review.lower()\n if not reviewtxt.find(foodtxt) == -1: # 리뷰속에서 음식을 찾으면\n for category in G_categorylist:\n categorytxt =category.lower()\n if not foodtxt.count(categorytxt) == 0: # 하나라도 있다, 추가시켜주기\n foodcategory = foodcategory + categorytxt + \",\"\n IsExist = False\n try:\n foodidx = self.c_menulist.index(food) # 이미 수집음식목록에 있나본다\n foodscore = self.c_menu_scorelist_2d[foodidx] #있으면 음 음 음 기존의 푸드스코어 배열을 주고\n self.c_foodidxlist.append(idx) # ㄱ속도향상을위해 review인덱스를 넘겨준다\n IsExist = True\n except: #없음\n self.c_menulist.append(food) # 음식을 추가하고\n self.c_foodidxlist.append(idx)\n #자이제 등급을매기자\n for gradeidx in range(0, c_keword.gradecnt): # 등급을 매기는 키워드 idx 끝까지돌면서\n for keyword in c_keword.keyword[gradeidx]: # 반환값은 해당 idx의 키워드 리스트\n if not reviewtxt.count(keyword) == 0: # 하나라도 있다, 추가시켜주기\n foodscore[gradeidx] = foodscore[gradeidx] + reviewtxt.count(keyword) # 스코어 값늘려주기\n foodchartxt = foodchartxt + keyword + \",\"\n for foodchar in G_footcharlist:\n chartxt = foodchar.lower()\n if not reviewtxt.count(chartxt) == 0: # 하나라도 있다, 추가시켜주기\n foodchartxt = foodchartxt + chartxt + \",\"\n\n if IsExist: #이미있던거\n self.c_menu_scorelist_2d[foodidx] = foodscore #갱신\n self.c_menu_categorylist_2d[foodidx] = foodcategory\n self.c_menu_foodchar[foodidx] = foodchartxt\n else: #없었으면추가\n self.c_menu_scorelist_2d.append(foodscore)\n self.c_menu_categorylist_2d.append(foodcategory)\n self.c_menu_foodchar.append(foodchartxt)\n idx = idx + 1\n#print self.c_menulist, len(self.c_menulist)\n#print self.c_menu_scorelist_2d, len(self.c_menu_scorelist_2d)\n#print self.c_menu_categorylist_2d, len(self.c_menu_categorylist_2d)\n def get_fillter_review(self):\n global c_keword\n query = \"tipsSort=popular&tipsPage=\" #?해서넘어온다\n try:\n pagecnt = int(math.ceil(int(self.c_totalreviewcnt) / 50.0))\n except:\n pagecnt = 1\n #우선첫번째꺼기입\n tmpreviewlist = self.c_bs.find_all(\"div\",class_=\"tipText\")\n imglist = self.c_bs.find_all(\"img\", class_=\"tipPhoto\")\n namelist = self.c_bs.find_all(\"span\",class_=\"userName\") # 이름 리스트 여기안에 href도 존재\n for remove in namelist[50:]: #50개이후는지움\n namelist.remove(remove)\n idlinklist =[]\n datelist = self.c_bs.find_all(\"span\",class_=\"tipDate\") #날짜리스트\n #날짜\n for i in range(len(datelist)):\n datelist[i] = datelist[i].get_text().encode('utf-8')\n #이름및 링크\n for i in range(len(namelist)):\n if namelist[i].find('a') is not None:\n idlinklist.append( namelist[i].find('a')['href'] )\n namelist[i] = namelist[i].get_text().encode('utf-8')\n#print idlinklist, len(idlinklist)\n#print namelist, len(namelist)\n #이미지는 다필요\n\n for i in imglist:\n self.c_imglist.append( i['src'] )\n idx = 0\n for tmpreview in tmpreviewlist:\n IsSearch = False\n tmpreview = tmpreview.get_text().encode('utf-8')\n tmpreview = tmpreview.lower() # 모든 문자를 소문자로 변환\n for gradeidx in range(0, c_keword.gradecnt): # 등급을 매기는 키워드 idx 끝까지돌면서\n if IsSearch is True:\n break;\n for keyword in c_keword.keyword[gradeidx]: # 반환값은 해당 idx의 키워드 리스트\n if not tmpreview.find(keyword) == -1: # 하나라도 찾았다\n self.c_reviewlist.append(tmpreview)\n try:\n self.c_humanlist.append(human(idlinklist[idx],namelist[idx],datelist[idx]))\n except:\n pass\n IsSearch = True\n break;\n idx +=1\n# print self.c_reviewlist, len(self.c_reviewlist)\n# for i in self.c_humanlist:\n# i.printinfo()\n# 나머지꺼 2개이상의 페이지를 가질때만 돌린다\n if pagecnt > 1:\n for i in range(2, pagecnt + 1):\n roofcnt = 0\n html = self.c_url + query + str(i)\n while (roofcnt < 2):\n try:\n f = urllib.request.urlopen(html)\n break;\n except:\n urllib.request.urlcleanup()\n print \"get_fillter_review error 2 : urlopen재시도중\"\n roofcnt += 1\n if not roofcnt == 2:\n resultXML = f.read()\n bs = BeautifulSoup(resultXML, \"lxml\")\n tmpreviewlist = bs.find_all(\"div\", class_=\"tipText\")\n imglist = bs.find_all(\"img\", class_=\"tipPhoto\")\n namelist = bs.find_all(\"span\", class_=\"userName\") # 이름 리스트 여기안에 href도 존재\n if len(namelist) > 50:\n for remove in namelist[50:]: # 50개이후는지움\n namelist.remove(remove)\n idlinklist = []\n datelist = bs.find_all(\"span\", class_=\"tipDate\") # 날짜리스트\n # 날짜\n for i in range(len(datelist)):\n datelist[i] = datelist[i].get_text().encode('utf-8')\n # 이름및 링크\n for i in range(len(namelist)):\n if namelist[i].find('a') is not None:\n idlinklist.append(namelist[i].find('a')['href'])\n namelist[i] = namelist[i].get_text().encode('utf-8')\n # print idlinklist, len(idlinklist)\n # print namelist, len(namelist)\n # 이미지는 다필요\n for i in imglist:\n self.c_imglist.append(i['src'])\n idx = 0\n for tmpreview in tmpreviewlist:\n IsSearch = False\n tmpreview = tmpreview.get_text().encode('utf-8')\n tmpreview = tmpreview.lower() # 모든 문자를 소문자로 변환\n for gradeidx in range(0, c_keword.gradecnt): # 등급을 매기는 키워드 idx 끝까지돌면서\n if IsSearch is True:\n break;\n for keyword in c_keword.keyword[gradeidx]: # 반환값은 해당 idx의 키워드 리스트\n if not tmpreview.find(keyword) == -1: # 하나라도 찾았다\n self.c_reviewlist.append(tmpreview)\n self.c_humanlist.append(human(idlinklist[idx], namelist[idx], datelist[idx]))\n IsSearch = True\n break;\n idx += 1\n\n#print self.c_reviewlist, len(self.c_reviewlist)\n#print self.c_imglist, len(self.c_imglist)\n#print len(self.c_humanlist#)\n#for i in self.c_humanlist:\n# i.printinfo()\n\ndef get_bs_by_url(_url):\n #KJVAVYPHOHNYVHKGOVBOQGAC5322EFAZILJE0DW3IX1DBPMW\n #SPUY1SE0AK2JEYC4NAXINQZPRMVGOJJRZ0DMHRWJ5OTXB3MM\n html = _url\n cnt = 0\n while( cnt < 3):\n try:\n f = urllib.request.urlopen(html)\n break;\n except urllib.error.HTTPError as e:\n print(e.reason)\n urllib.request.urlcleanup()\n print \"get_bs_by_url error 1 : urlopen재시도중\"\n try:\n f = urllib.request.urlopen(html)\n break;\n except:\n urllib.request.urlcleanup()\n print \"리셋후재연결중\"\n cnt += 1\n if cnt == 3: return None\n resultXML = f.read()\n bs = BeautifulSoup(resultXML, \"lxml\")\n#print bs.prettify()\n return bs\nclass get_keword_by_txt:\n def __init__(self,filename):\n self.keyword = [] # 등급별 키워드 2차원배열\n self.gradecnt = 0 #등급이 몇분류 까지 되어있는지\n with open(filename, \"r\") as f:\n line = f.read().decode(\"utf-8-sig\").encode(\"utf-8\")\n content = line.split('@')\n grade_list = []\n for i in range(0, len(content)):\n if not content[i] == '':\n grade_list.append(content[i].strip())#공백제거\n self.gradecnt = len(grade_list) #몇등급 분류되어있는지 가져온다\n\n for keyword in grade_list:\n key = keyword.split(\":\")[1]\n #print key\n self.keyword.append(list(key.split(','))) # 2차원 배열로 정리\n #print self.keyword , len(self.keyword), (self.gradecnt)\ndef get_category_by_txt(filename):\n with open(filename, \"r\") as f:\n line = f.read().decode(\"utf-8-sig\").encode(\"utf-8\")\n content = line.split('/')\n for idx in range(len(content)):\n content[idx] = content[idx].strip()\n return content\ndef make_excel(collectclass,restaurantinfoclass, num, workbook):\n sheetname = '(' + str(num) + ').'+restaurantinfoclass.r_name\n try:\n worksheet = workbook.add_worksheet(sheetname)\n except:\n a = ['[', ']', ':', '*', '?', '/']\n b = ' \\ '\n a.append(b.strip())\n print \"워크시트이름 오류 :\",sheetname\n sheetname = sheetname [:10] + \"...\"\n print sheetname\n try:\n worksheet = workbook.add_worksheet(sheetname)\n except:\n print \"워크시트 이름최종오류2\"\n sheetname = sheetname[:3]+\"...\"\n for i in a:\n sheetname = sheetname.replace(i, '')\n worksheet = workbook.add_worksheet(sheetname)\n worksheet.set_column('A:A', 6) # 레이팅\n worksheet.set_column('B:B', 25) # 주소\n worksheet.set_column('C:C', 20) # 업체이름\n col_list = ['D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O','P','Q','R']\n for j in range(0, 5): # 레이팅적는곧\n worksheet.set_column(col_list[j] + \":\" + col_list[j], 6)\n sangidx = 5\n worksheet.set_column(col_list[sangidx] + \":\" + col_list[sangidx], 15) # 상품특징\n snagjong = sangidx+1\n worksheet.set_column(col_list[snagjong] + \":\" + col_list[snagjong], 15) # 음식종류\n sangtime = snagjong+1\n worksheet.set_column(col_list[sangtime] + \":\" + col_list[sangtime], 15) # 음식종류\n blankidx = sangtime + 1\n worksheet.set_column(col_list[blankidx] + \":\" + col_list[blankidx], 2) # 빈칸\n reidx = blankidx + 1\n worksheet.set_column(col_list[reidx] + \":\" + col_list[reidx], 100) # 리뷰\n humanidx = reidx+1\n worksheet.set_column(col_list[humanidx] + \":\" + col_list[humanidx], 20) # 이름\n humanidx2 =humanidx+1\n worksheet.set_column(col_list[humanidx2] + \":\" + col_list[humanidx2], 20) # 날짜\n humanidx3 =humanidx2+ 1\n worksheet.set_column(col_list[humanidx3] + \":\" + col_list[humanidx3], 20) # flink\n humanidx4 =humanidx3+ 1\n worksheet.set_column(col_list[humanidx4] + \":\" + col_list[humanidx4], 5) # tip\n humanidx5 =humanidx4+ 1\n worksheet.set_column(col_list[humanidx5] + \":\" + col_list[humanidx5], 50) # img\n # 첫줄\n format = workbook.add_format()\n format.set_font_size(8)\n format.set_bold()\n format.set_align('center')\n format.set_bg_color('yellow')\n format.set_border(True)\n # 한글은 앞에 u자붙여라\n worksheet.write('A1', \"Rating\", format)\n worksheet.write('B1', u\"업체명(위) / 주소(아래)\", format)\n worksheet.write('C1', u\"음식명\", format)\n for k in range(0, 5): # 레이팅적는곧\n worksheet.write(col_list[k] + \"1\", \"[\" + str(k + 1) + \"]\", format)\n worksheet.write(col_list[sangidx] + \"1\", u\"음식특징\", format)\n worksheet.write(col_list[snagjong] + \"1\", u\"음식종류\", format)\n worksheet.write(col_list[sangtime] + \"1\", u\"시간대\", format)\n worksheet.write(col_list[blankidx] + \"1\", \"\", format)\n worksheet.write(col_list[reidx] + \"1\", \"filltered review\", format)\n worksheet.write(col_list[humanidx] + \"1\", \"user id\", format)\n worksheet.write(col_list[humanidx2] + \"1\", \"date\", format)\n worksheet.write(col_list[humanidx3] + \"1\", \"facebook link\", format)\n worksheet.write(col_list[humanidx4] + \"1\", \"Tip\", format)\n worksheet.write(col_list[humanidx5] + \"1\", \"img url\", format)\n #입력시작\n format = workbook.add_format()\n format.set_font_size(8)\n format.set_align('center')\n spformat = workbook.add_format()\n spformat.set_font_size(8)\n spformat.set_align('center')\n spformat.set_bg_color('red')\n #기본적인 정보 입력\n row_start = 1 # 시작할 행넘버\n worksheet.write(row_start, 0, restaurantinfoclass.r_rating, format)\n worksheet.write(row_start, 1, restaurantinfoclass.r_name, format)\n worksheet.write(row_start + 1, 1, restaurantinfoclass.r_address, format)\n #음식입력\n for idx in range(0, len(collectclass.c_menulist)): # 수집된 음식 개수만큼돈다\n worksheet.write(row_start, 2, collectclass.c_menulist[idx], format)\n for q in range(0, 5): # 레이팅적는곧\n worksheet.write(row_start, 3 + q, collectclass.c_menu_scorelist_2d[idx][q], format)\n worksheet.write(row_start, 2, collectclass.c_menulist[idx], format)\n worksheet.write(row_start, 3 + 5, collectclass.c_menu_foodchar[idx], format) # 음식특징\n worksheet.write(row_start, 3 + 6, collectclass.c_menu_categorylist_2d[idx], format)#음식카테고리\n worksheet.write(row_start, 3 + 7, restaurantinfoclass.r_time, format) # 음식카테고리\n row_start += 1\n # Write some numbers, with row/column notation.\n row_start = 1 # 시작할 행넘버\n blankidx = blankidx + 3\n reidx = reidx + 3 # 레이팅, 주소, 음식명이 3개니까\n for idx in range(0, len(collectclass.c_reviewlist)): # 리뷰 를 출력할것\n try:\n worksheet.write(row_start, humanidx+3, collectclass.c_humanlist[idx].name, format)\n worksheet.write(row_start, humanidx2+3, collectclass.c_humanlist[idx].date, format)\n worksheet.write(row_start, humanidx3+3, collectclass.c_humanlist[idx].flink, format)\n worksheet.write(row_start, humanidx4+3, collectclass.c_humanlist[idx].tip, format)\n collectclass.c_foodidxlist.index(idx) # 찾았는데 있다 음식키워드가 있던글\n worksheet.write(row_start, blankidx, idx + 1, spformat)\n worksheet.write(row_start, reidx, collectclass.c_reviewlist[idx], format)\n except: # 없다\n worksheet.write(row_start, blankidx, idx + 1, format)\n try:\n worksheet.write(row_start, reidx, (collectclass.c_reviewlist[idx]), format)\n except:\n print(\"인코딩변환 시도\")\n try:\n worksheet.write(row_start, reidx, (collectclass.c_reviewlist[idx].decode('utf-8')), format)\n except:\n print(\"인코딩오류\")\n worksheet.write(row_start, reidx, (\"encoding error\"), format)\n row_start += 1\n row_start = 1 # 시작할 행넘버\n #print collectclass.c_imglist\n for idx in range(0, len(collectclass.c_imglist)): # 리뷰 를 출력할것\n worksheet.write(row_start, humanidx5+3, collectclass.c_imglist[idx], format)\n row_start +=1\n\nc_keword = get_keword_by_txt(\"grade.txt\") #키워드 등급정보가 있는 파일이름을 적으면됨 클래스 객체 반환\nG_categorylist = get_category_by_txt(\"category.txt\")\nG_footcharlist = get_category_by_txt(\"food.txt\")\ndef get_urllist():\n url = \"https://ko.foursquare.com\"\n urllist = []\n with open(\"test2.txt\", \"r\") as f:\n resultXML = f.read().decode(\"utf-8-sig\").encode(\"utf-8\")\n bs = BeautifulSoup(resultXML, 'html.parser')\n venue = bs.find_all(\"div\", class_=\"venueName\")\n for i in venue:\n urllist.append( url+i.find('a')['href'] )\n return urllist\ndef main():\n urllist = get_urllist()\n print urllist, len(urllist)\n #startidx = raw_input(\"시작:\")\n #endidx = raw_input(\"끝:\")\n startidx = 81\n endidx = 90\n name = \"boston\"\n idx = 0\n with xlsxwriter.Workbook(name+ str(startidx) + \"-\" + str(endidx) + \".xlsx\") as workbook:\n print name,startidx, '~' ,endidx,'분석시작'\n for url in urllist[int(startidx)-1:int(endidx)]:\n menuurl = url + '/menu'\n url = url + '?'\n print idx+1, \" 개 진행중...\"\n print url\n bs = get_bs_by_url(url)\n if bs is None:\n idx += 1\n continue\n menu_bs = get_bs_by_url(menuurl)\n print \"분석중...\"\n c_restaurant = restaurant(bs,menu_bs) # bs 데이터 넘겨주고\n c_restaurant.get_info_by_bs() # bs 로 레스토랑의 info를 자체클래스에 입력\n c_collection = collection(url,c_restaurant.r_reviewtotalcnt,bs) # basic url과 review total전달\n c_collection.get_fillter_review()\n c_collection.extract_food(c_restaurant.r_menulist)\n c_collection.extract_userinfo_byurl()\n make_excel(c_collection,c_restaurant, idx+int(startidx), workbook)\n idx += 1\n\nmain()\n\n\n\n\n\n\n\n\n" }, { "alpha_fraction": 0.6597937941551208, "alphanum_fraction": 0.6597937941551208, "avg_line_length": 17.600000381469727, "blob_id": "0eb2b31af31897f72ca2b6bfb5ba77b1196cd929", "content_id": "b8312088935e8153913783f5d43f19e1c07423f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 143, "license_type": "no_license", "max_line_length": 35, "num_lines": 5, "path": "/README.md", "repo_name": "DevHyung/Foursqure-Scrapping", "src_encoding": "UTF-8", "text": "# Foursqure_Scrapping\n___\n# summary : 포스퀘어 사이트의 리뷰분석\n# category, grade 등의txt 파일을이용 리뷰분석\n___\n\n\n\n\n" } ]
2
mfa-darx/WWE-Network-Downloader
https://github.com/mfa-darx/WWE-Network-Downloader
e6f302117d331f6ee5c5fd2ed09d82b2cdb7e996
29eaf90f1fa96094b38d9d4e93899684cf248515
a9f15710a7f258ef7971e50596b15b176398f363
refs/heads/master
2023-06-12T01:42:37.491320
2021-07-07T06:44:30
2021-07-07T06:44:30
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6849694848060608, "alphanum_fraction": 0.7260122299194336, "avg_line_length": 37.36170196533203, "blob_id": "601981c73758f288df9c7919f5467f4bf611cb64", "content_id": "d4cdc991ce94cd13b1ec90fe1d885fb368d89eb8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1803, "license_type": "no_license", "max_line_length": 148, "num_lines": 47, "path": "/README.md", "repo_name": "mfa-darx/WWE-Network-Downloader", "src_encoding": "UTF-8", "text": "WWE Network 2.0 Downloader (Improved)\n\nFreyta's WWE Network 2.0 Downloader using Python3. This was coded by me from scratch, ideas were taken from youtube-dl.\n\nFeatures include the following:\n- Downloading from set start times\n- Ending at certain times (i.e. only downloading certain matches)\n- Qualtiy selection (1 being 1080p, 6 being 288p)\n- Kodi NFO file creations (TV episode and Series only at the moment - PPV needs to be added)\n- Part downloading of files.\n\n### Prerequisites\n\nYou must have at least version 4.2 ffmpeg installed and in your PATH.\n\nUsing pip3 install the required modules:\n\n`pip3 install --user -r requirements.txt`\n\nEdit the username and password variables in CONSTANTS.py to include your subscription email and password\n\n### Usage instructions\n\n###### Basic video download:\n\n`python3 main.py -t https://watch.wwe.com/episode/SmackDown-130268`\n\n###### Download with start and end times, using custom file name:\n\n`python3 main.py -st 1619.934 -et 1712.834 -of 'Tucker confronts Mandy Rose Smackdown 02-21-2020' -t https://watch.wwe.com/episode/SmackDown-130268`\n\n###### Download chapterised 720p video with Kodi series and episode NFO files:\n\n`python3 main.py -c -q 3 -s -e -t https://watch.wwe.com/episode/Bret-Hart-132278`\n\n\n### Options\n\n> **-t** - Link to the video you want to download\\\n> **-q** - Quality of the video you want to download. 1 is 1080p high (default) 6 being 288p (lowest)\\\n> **-c** - Add milestone chapters to the video.\\\n> **-k** - Keep temporary aac and ts files\\\n> **-e** - Write a Kodi episode NFO file\\\n> **-s** - Write a Kodi series NFO file with poster and fanart\\\n> **-st** - Start time in seconds from where you want to start downloading\\\n> **-et** - End time in seconds from where you want to finish downloading\\\n> **-of** - Custom output filename\n" }, { "alpha_fraction": 0.5037383437156677, "alphanum_fraction": 0.5178905129432678, "avg_line_length": 31.004405975341797, "blob_id": "aa4f66aec7cec30f6419a6225fe23e308cb261c4", "content_id": "feb0d73fb81c59021594b5898fc26f194b860f5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7490, "license_type": "no_license", "max_line_length": 163, "num_lines": 227, "path": "/kodi_nfo.py", "repo_name": "mfa-darx/WWE-Network-Downloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\r\n\r\nimport CONSTANTS, os\r\nimport json, random\r\nimport requests\r\nimport arrow\r\n\r\n\r\ndef make_safe_filename(s):\r\n def safe_char(c):\r\n if c.isalnum():\r\n return c\r\n else:\r\n return \"_\"\r\n return \"\".join(safe_char(c) for c in s).rstrip(\"_\")\r\n\r\ndef create_multi_ep_nfo(show_list):\r\n print(show_list)\r\n id = []\r\n for i in show_list:\r\n try:\r\n # Get the show JSON file\r\n show_json = requests.get('https://cdn.watch.wwe.com/api/page?path={}'.format(i)).json()\r\n\r\n entry = show_json['entries'][0]['item']\r\n\r\n #try:\r\n # .title() makes JUL turn into Jul - camelcase.\r\n title = '{} {} - {}'.format(entry['customFields']['Franchise'], entry['episodeName'], entry['metadataLines'][0]['lines'][1].title())\r\n try:\r\n plot = entry['description']\r\n except:\r\n plot = \"\"\r\n description = entry['shortDescription']\r\n episode = entry['episodeNumber']\r\n mpaa = entry['classification']['name']\r\n aired = entry['firstBroadcastDate'][:10]\r\n season = entry['releaseYear']\r\n\r\n # WWE Raw - S20E01 - 1-02-2012.avi\r\n file_date = arrow.get(entry['firstBroadcastDate'],'YYYY-MM-DDTHH:mm:ss')\r\n file_date = file_date.format('M-DD-YYYY')\r\n if(entry['episodeNumber'] < 10):\r\n ep_num = \"0\" + str(entry['episodeNumber'])\r\n else:\r\n ep_num = entry['episodeNumber']\r\n file_name = '{} {} - S{}E{} - {}'.format(entry['customFields']['Franchise'],\r\n entry['episodeName'],\r\n entry['releaseYear'],\r\n ep_num,\r\n file_date)\r\n\r\n # FORMAT:\r\n # 0 = title\r\n # 1 - Year + Season\r\n # 2 - Episode\r\n # 3 - Outline\r\n # 4 - Plot\r\n # 5 - aired\r\n\r\n nfo_text = \"<episodedetails>\\n\\\r\n <title>{0}</title>\\n\\\r\n <season>{1}</season>\\n\\\r\n <episode>{2}</episode>\\n\\\r\n <outline>{3}</outline>\\n\\\r\n <plot>{4}</plot>\\n\\\r\n <id></id>\\n\\\r\n <genre>Sport</genre>\\n\\\r\n <year>{1}</year>\\n\\\r\n <aired>{5}</aired>\\n\\\r\n <studio>WWE Network</studio>\\n\\\r\n <trailer></trailer>\\n\\\r\n </episodedetails>\".format(title,season,episode,description,plot,aired)\r\n\r\n #f= open(\"{}.nfo\".format(file_name),\"w+\")\r\n #f.write(nfo_text)\r\n #print(\"{} is done\".format(file_name))\r\n except:\r\n print(\"failed\")\r\n pass\r\n return nfo_text, file_name\r\n\r\ndef create_episode_nfo(url, series_folder, file_name = None):\r\n id = []\r\n # Get the show JSON file\r\n show_json = requests.get('https://cdn.watch.wwe.com/api/page?path={}'.format(url)).json()\r\n\r\n entry = show_json['entries'][0]['item']\r\n\r\n #try:\r\n # .title() makes JUL turn into Jul - camelcase.\r\n title = entry['title']\r\n try:\r\n plot = entry['description']\r\n except:\r\n plot = \"\"\r\n\r\n description = entry['shortDescription']\r\n if(description == \"-1\"):\r\n description = plot\r\n try:\r\n episode = entry['episodeNumber']\r\n except KeyError:\r\n episode = 0\r\n # Some episodes are specials which haven't got an episode number\r\n # i.e. https://watch.wwe.com/episode/Best-Stunner-reactions-WWE-Top-10-March-15-2020-132341\r\n if episode <= 0:\r\n episode = 0\r\n\r\n mpaa = entry['classification']['name']\r\n aired = entry['firstBroadcastDate'][:10]\r\n season = entry['releaseYear']\r\n\r\n # WWE Raw - S20E01 - 1-02-2012.avi\r\n file_date = arrow.get(entry['firstBroadcastDate'],'YYYY-MM-DDTHH:mm:ssZ')\r\n file_date = file_date.format('M-DD-YYYY')\r\n try:\r\n if(entry['episodeNumber'] < 10):\r\n ep_num = \"0\" + str(entry['episodeNumber'])\r\n else:\r\n ep_num = entry['episodeNumber']\r\n except KeyError:\r\n ep_num = 0\r\n\r\n if (entry.get('episodeNumber') == '0'):\r\n ep_num = \"00\"\r\n\r\n if not file_name:\r\n file_name = '{} {} - S{}E{} - {}'.format(entry['customFields']['Franchise'],\r\n entry['episodeName'],\r\n entry['releaseYear'],\r\n ep_num,\r\n file_date)\r\n\r\n # FORMAT:\r\n # 0 = title\r\n # 1 - Year + Season\r\n # 2 - Episode\r\n # 3 - Outline\r\n # 4 - Plot\r\n # 5 - aired\r\n\r\n nfo_text = \"<episodedetails>\\n\\\r\n <title>{0}</title>\\n\\\r\n <season>{1}</season>\\n\\\r\n <episode>{2}</episode>\\n\\\r\n <outline>{3}</outline>\\n\\\r\n <plot>{4}</plot>\\n\\\r\n <id></id>\\n\\\r\n <genre>Sport</genre>\\n\\\r\n <year>{1}</year>\\n\\\r\n <aired>{5}</aired>\\n\\\r\n <studio>WWE Network</studio>\\n\\\r\n <trailer></trailer>\\n\\\r\n</episodedetails>\".format(title,season,episode,description,plot,aired)\r\n\r\n\r\n if not os.path.isdir(\"./{}/{}\".format(CONSTANTS.OUTPUT_FOLDER,series_folder)):\r\n os.mkdir(\"./{}/{}\".format(CONSTANTS.OUTPUT_FOLDER,series_folder))\r\n\r\n f= open(CONSTANTS.OUTPUT_FOLDER + \"/\" + series_folder + \"/{}.nfo\".format(file_name),\"w+\")\r\n f.write(nfo_text)\r\n\r\n\r\n# Create a Kodi compliant NFO file\r\ndef create_show_nfo(nfo_text, title, wallpaper, poster):\r\n print(title)\r\n f= open(CONSTANTS.OUTPUT_FOLDER + \"/\" + title + \"/tvshow.nfo\", \"w+\")\r\n f.write(nfo_text)\r\n\r\n # Download the fanart\r\n image = requests.get(wallpaper)\r\n open(CONSTANTS.OUTPUT_FOLDER + \"/\" + title + \"/fanart.png\", \"wb\").write(image.content)\r\n print(\"Saved fanart\")\r\n\r\n # Download the poster\r\n image = requests.get(poster)\r\n open(CONSTANTS.OUTPUT_FOLDER + \"/\" + title + \"/poster.png\", \"wb\").write(image.content)\r\n print(\"Saved poster\")\r\n\r\n# Get the basics for our Kodi NFO and the series name\r\ndef get_show_info(link):\r\n URL = \"https://cdn.watch.wwe.com/api/page?list_page_size=100&path={}&item_detail_expand=all\".format(link)\r\n show_json = requests.get(URL).json()\r\n\r\n #try:\r\n # NEW_URL = \"https://cdn.watch.wwe.com/api/page?list_page_size=100&path={}&item_detail_expand=all\".format(show_json['entries'][0]['item']['season']['path'])\r\n # print(NEW_URL)\r\n # show_json = requests.get(NEW_URL).json()\r\n #except KeyError:\r\n # pass\r\n\r\n #f = open(\"test.txt\",\"w\")\r\n #f.write(json.dumps(show_json, indent=4))\r\n #exit()\r\n\r\n i = show_json['entries'][0]['item']\r\n franchise = i['customFields']['Franchise']\r\n title = i['title']\r\n if franchise not in title[:len(franchise)]:\r\n title = \"{} {}\".format(i['customFields']['Franchise'], i['title'])\r\n description = i['description']\r\n mpaa = i['classification']['name']\r\n # FORMAT:\r\n # 0 = title\r\n # 1 - description\r\n # 2 - mpaa\r\n\r\n nfo_text = \"<tvshow>\\n\\\r\n <title>{0}</title>\\n\\\r\n <showtitle>{0}</showtitle>\\n\\\r\n <userrating>{3}</userrating>\\n\\\r\n <outline>{1}</outline>\\n\\\r\n <plot>{1}</plot>\\n\\\r\n <mpaa>{2}</mpaa>\\n\\\r\n <genre>Sports</genre>\\n\\\r\n <studio>WWE Network</studio>\\n\\\r\n</tvshow>\".format(title,description,mpaa,random.randint(5,10))\r\n\r\n wallpaper = i['images']['wallpaper']\r\n poster = i['images']['poster']\r\n return title, nfo_text, wallpaper, poster\r\n\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Please run python main.py instead.\")\r\n pass" }, { "alpha_fraction": 0.6919554471969604, "alphanum_fraction": 0.7068069577217102, "avg_line_length": 35.39639663696289, "blob_id": "b258bb148c0b5dbc92604c7e68cad8d98e6b417f", "content_id": "6fe217bc91d127a43fffb4002cc85a9434891483", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8080, "license_type": "no_license", "max_line_length": 165, "num_lines": 222, "path": "/main.py", "repo_name": "mfa-darx/WWE-Network-Downloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nimport wwe\nimport json\nimport subprocess\nimport m3u8, os,re\nimport argparse\nimport download_util, kodi_nfo, CONSTANTS, db_util\nimport time\n\ndef clean_text(text):\n # Thanks to https://stackoverflow.com/a/27647173\n return re.sub(r'[\\\\\\\\\\\\\\'/*?:\"<>|]',\"\",text)\n\n# GET ARGS FOR EPISODE TO DOWNLOAD\nparser = argparse.ArgumentParser(description='Download videos off the WWE Network.')\nparser.add_argument('-t','--title', help='Link of the video you want to download. Example: /episode/Prime-Time-Wrestling-9283', required=True)\nparser.add_argument('-q','--quality', help='Quality of the video you wish to download. Value between 1 (highest) and 6 (lowest). Defaults to 1080p.', required=False)\nparser.add_argument('-c','--chapter', help='Add chapter \"milestones\" to the video.', required=False, action='store_true')\nparser.add_argument('-k','--keep_files', help='Keep the temporary download files.', required=False, action='store_true')\nparser.add_argument('-e','--episode_nfo', help='Create a Kodi format NFO TV episode file.', required=False, action='store_true')\nparser.add_argument('-s','--series_nfo', help='Create a Kodi format NFO TV show file.', required=False, action='store_true')\nparser.add_argument('-st','--start_time', help='How far into the video you want to start, in seconds. Note: Will overide other start points.', required=False)\nparser.add_argument('-et','--end_time', help='How far into the video you want to stop, in seconds.', required=False)\nparser.add_argument('-of','--output_filename', help='Custom output file name.', required=False)\nparser.add_argument('-f','--force', help='Overwrite previously downloaded files.', required=False, action='store_true')\n\nargs = vars(parser.parse_args())\n\ncreate_episode_nfo = False\ncreate_series_nfo = False\nkeep_files = False\nforce_download = False\n# Set the default video quality to 1080p\nQUALITY = CONSTANTS.VIDEO_QUALITY[0]\n\n# Get the episode title\nif args['title']:\n EPISODE = args['title']\n if \"https://watch.wwe.com\" in EPISODE:\n EPISODE = EPISODE.replace(\"https://watch.wwe.com\", \"\")\n\n# If the title wasn't set\nif not EPISODE:\n\tprint(\"No episode found. Use the --title or -t parameter.\")\n\texit()\n\n# Some links have a starting point in their link, i.e https://watch.wwe.com/episode/Expect-The-Unexpected-9842?startPoint=371.701\nif \"?startPoint=\" in EPISODE:\n START_FROM = EPISODE.split(\"?startPoint=\")[1]\n # Remove the startPoint from our EPISODE - probably isn't needed\n EPISODE = EPISODE.split(\"?startPoint=\")[0]\n\n# Prefix a / if we haven't included it in our title otherwise we get an error\nif not EPISODE.startswith(\"/\"):\n EPISODE = \"/\" + EPISODE\n\n# Do we want to keep the downloaded files?\n#keep_files = False\nif args['keep_files']:\n keep_files = True\n\n# Do we want to create an episode or series nfo file as well?\nif args['episode_nfo']:\n create_episode_nfo = True\nif args['series_nfo']:\n create_series_nfo = True\n\n# Set the start and end times\nif args['start_time']:\n START_FROM = args['start_time']\nif args['end_time']:\n END_TIME = args['end_time']\n\n# Set the custom output title\nCUSTOM_FILENAME = \"\"\nif args['output_filename']:\n CUSTOM_FILENAME = args['output_filename']\n\n# Set the quality of the video we want\nif args['quality']:\n if int(args['quality']) < 0 or int(args['quality']) >= len(CONSTANTS.VIDEO_QUALITY):\n print(\"Invalid quality choice. It must be between 0 (1080p) and {} (288p)\".format(len(CONSTANTS.VIDEO_QUALITY)))\n exit()\n\n QUALITY = CONSTANTS.VIDEO_QUALITY[int(args['quality'])]\n\nif args['force']:\n force_download = True\n\n# Login\nif CONSTANTS.USERNAME == \"\" or CONSTANTS.PASSWORD == \"\":\n print(\"Please enter a username and/or password.\")\n exit()\n\naccount = wwe.wwe_network(CONSTANTS.USERNAME,CONSTANTS.PASSWORD)\naccount.login()\n\n# Get the video JSON which tells us the hls url link\nvideo_link = account.get_video_info(EPISODE)\n\n# Quit if the video information is empty\nif not video_link:\n exit()\n\n# Grab the m3u8\nstream_url = account.m3u8_stream(video_link[0])\nif not CUSTOM_FILENAME:\n title = video_link[1]\nelse:\n title = CUSTOM_FILENAME\n\nprint(\"Got the video information\")\n\n# Connect to the database where we store which videos we have downloaded\ndatabase = db_util.database()\ndatabase.db_connect()\n\n# Check if we have already downloaded the video before.\ndb_q = database.db_query(video_link[2])\n\n# If we haven't forced the download, then we will display an error and quit\nif not force_download and db_q:\n print(\"You have already downloaded this video.\")\n print(\"If you want to download this file anyway, please use --force or -f\")\n print(\"Quitting.\")\n exit()\n\n# Get the base url of our video\nbase_url = stream_url.split(\".m3u8\")[0].rsplit(\"/\", 1)\n\n# Initialise the downloader\ndownload = download_util.download()\nindex_m3u8 = download.get_index_m3u8(stream_url)\n\nindex_m3u8_obj = m3u8.loads(index_m3u8.data.decode('utf-8'))\n\n# Get our audio playlist\naudio_qualities = []\nfor i in index_m3u8_obj.media:\n # We want English audio, so any files with eng as it's language is added to our list\n if \"eng\" in i.language:\n audio_qualities.append((int(i.group_id.split('audio-')[1]), base_url[0]+\"/\"+ i.uri))\n\n# Sort the audio quality from high to low\naudio_qualities.sort(reverse=True)\n# Choose the playlist we want\naudio_playlist = download.get_playlist_object(audio_qualities[0][1])\n\n# The kwargs we will pass to the downloader\nkwargs = {\"playlist\":audio_playlist,\n \"base_url\":audio_qualities[0][1].split(\"index.m3u8\")[0],\n \"title\":clean_text(title)\n }\n\n# If we have a start_time then add the set start time, otherwise default to 0\ntry:\n if START_FROM:\n kwargs.update({\"start_from\":START_FROM})\nexcept NameError:\n kwargs.update({\"start_from\":0})\n\n# If we have an end_time then add the set end time, otherwise default to 0\ntry:\n if END_TIME:\n kwargs.update({\"end_time\":END_TIME})\nexcept NameError:\n kwargs.update({\"end_time\":0})\n\n# Download the audio file\ndownload.download_playlist(**kwargs)\n\n#Get our playlist. We want 1080p\nvideo_selections = []\n\nfor i in index_m3u8_obj.playlists:\n\n if (i.stream_info.average_bandwidth <= QUALITY * 1000):\n # Create a list of potential URIs\n video_selections.append((i.stream_info.bandwidth, base_url[0] + \"/\" + i.uri))\n\n# Select the first one which has the highest bitrate\nvideo_selections.sort(reverse=True)\n# Get the playlist m3u8 we want to download\nvideo_playlist = download.get_playlist_object(video_selections[0][1])\n\n# Update the kwargs that we will send to the downloader\nkwargs.update({\"playlist\":video_playlist})\nkwargs.update({\"base_url\":video_selections[0][1].split(\"index.m3u8\")[0]})\n\n# Download the playlist\ndownload.download_playlist(**kwargs)\n\n# Download the chapter information\naccount.get_chapter_information(EPISODE, clean_text(title), args['chapter'])\n\nseries_info = kodi_nfo.get_show_info(EPISODE)\n\n# Create output folder if it doesn't exist\nif not os.path.exists(CONSTANTS.OUTPUT_FOLDER + \"/\" + clean_text(series_info[0])):\n os.makedirs(CONSTANTS.OUTPUT_FOLDER + \"/\" + clean_text(series_info[0]))\n\nif(create_series_nfo):\n print(\"Creating Kodi series NFO file\")\n kodi_nfo.create_show_nfo(series_info[1], clean_text(series_info[0]), series_info[2], series_info[3])\n print(\"Created Kodi series NFO file\")\n\nif(create_episode_nfo):\n print(\"Creating Kodi episode NFO file\")\n kodi_nfo.create_episode_nfo(EPISODE, clean_text(series_info[0]), clean_text(title))\n print(\"Created Kodi episode NFO file\")\n\n# Finally we want to combine our audio and video files\ndownload.combine_videos(clean_text(title), clean_text(series_info[0]), keep_files=keep_files)\n\n# Insert the downloaded video into our database\nif db_q:\n database.db_upd(video_link[2], video_link[1], str(video_selections[0][0]), int(time.time()))\n print(\"Updated database with the new video information\")\nelse:\n print(\"Inserted the video into the database\")\n database.db_ins(video_link[2], video_link[1], str(video_selections[0][0]), int(time.time()))\n" }, { "alpha_fraction": 0.3636363744735718, "alphanum_fraction": 0.6753246784210205, "avg_line_length": 14.399999618530273, "blob_id": "010851eedb88a989345e4a4b3a3e9135436f57cc", "content_id": "d5ddaa045f288e464634e35d6d8e47066707bf3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 77, "license_type": "no_license", "max_line_length": 17, "num_lines": 5, "path": "/requirements.txt", "repo_name": "mfa-darx/WWE-Network-Downloader", "src_encoding": "UTF-8", "text": "requests==2.20.0\nm3u8==0.3.8\narrow==0.15.2\nurllib3==1.26.5\ncertifi==2019.3.9\n" }, { "alpha_fraction": 0.4223918616771698, "alphanum_fraction": 0.5216284990310669, "avg_line_length": 21.117647171020508, "blob_id": "3d24bfc5a819c837ca506f599471bed6867ffd9f", "content_id": "9131cfa27fe29223be810663cc7dfd0c00f57457", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 786, "license_type": "no_license", "max_line_length": 105, "num_lines": 34, "path": "/CONSTANTS.py", "repo_name": "mfa-darx/WWE-Network-Downloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\r\n\r\n# USERNAME AND PASSWORD\r\nUSERNAME = \"\"\r\nPASSWORD = \"\"\r\n\r\n# TO GET THE EPISODE JSON INFORMATION\r\nHEADERS = {\r\n 'User-Agent': 'okhttp/3.12.1'\r\n}\r\n\r\nREALM_HEADERS = {\r\n 'x-api-key': '640a69fb-68b1-472c-ba4b-36f50288c984',\r\n 'realm': 'dce.wwe'\r\n}\r\n\r\n# FOR THE DOWNLOADER\r\nDOWNLOAD_HEADERS = {'user-agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0'}\r\nOUTPUT_FOLDER = \"output\"\r\nTEMP_FOLDER = \"temp\"\r\n\r\n\r\n# VIDEO SIZE INFORMATION\r\nVIDEO_QUALITY = [10000, # 1080p high\r\n 6500, # 1080p low\r\n 4500, # 720p high\r\n 2100, # 720p low\r\n 1500, # 504p\r\n 1000, # 360p\r\n 600] # 288p\r\n\r\nif __name__ == \"__main__\":\r\n print(\"Please run python main.py instead.\")\r\n pass\r\n" }, { "alpha_fraction": 0.6004255414009094, "alphanum_fraction": 0.6051063537597656, "avg_line_length": 39.534481048583984, "blob_id": "c236ba8a350d02b5b617331ed454655f709adacd", "content_id": "c6bf70fd9b9f0b2f3a32840f4904ae8d1121d96d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2350, "license_type": "no_license", "max_line_length": 153, "num_lines": 58, "path": "/db_util.py", "repo_name": "mfa-darx/WWE-Network-Downloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\nimport sqlite3\nimport os\n\nclass database:\n\n # Initialise the database\n def __init__(self):\n self.conn = sqlite3.connect(\"events.db\")\n self.c = self.conn.cursor()\n\n\n # Connect to the database. If the file doesn't exist, it creates it.\n def db_connect(self):\n \n # Thanks to stackoverflow for help with this one.\n # https://stackoverflow.com/a/1604121\n database_exists = self.c.execute(\"SELECT name FROM sqlite_master WHERE name='downloads'\").fetchone()\n \n if not database_exists:\n # id = DiceVideoId\n # name = the name of event/episode\n # quality = bitrate\n # date = timestamp\n self.c.execute(\"CREATE TABLE downloads (id integer unique, name text, quality text, date integer)\")\n self.conn.commit()\n\n # Insert download into the database\n def db_ins(self, video_id, video_name, video_qual, timestamp):\n try:\n self.c.execute(\"INSERT INTO downloads VALUES ('{}', '{}', '{}', '{}')\".format(video_id, video_name, video_qual, timestamp))\n self.conn.commit()\n except sqlite3.IntegrityError: \n print(\"Error: Couldn't add {} to the database. ID already exists.\".format(video_id))\n\n # Update download information in the database\n def db_upd(self, video_id, video_name, video_qual, timestamp):\n self.c.execute(\"UPDATE downloads SET name = '{}', quality = '{}', date = '{}' WHERE id = {}\".format(video_name, video_qual, timestamp, video_id))\n print(\"UPDATE downloads SET name = '{}', quality = '{}', date = '{}' WHERE id = {}\".format(video_name, video_qual, timestamp, video_id))\n self.conn.commit()\n\n # Query the database for previously downloaded episode\n def db_query(self, video_id):\n #self.c.execute(\"INSERT INTO downloads VALUES ('{}', '{}', '{}', '{}')\".format(video_id, video_name, video_qual, timestamp))\n result = self.c.execute(\"SELECT date FROM downloads WHERE id = '{}'\".format(video_id))\n if result.fetchone():\n return True\n else:\n return False\n\n # Close the database, and commit any final changes\n def db_close(self):\n self.conn.commit()\n self.conn.close()\n\nif __name__ == \"__main__\":\n print(\"Please run python main.py instead.\")\n pass" }, { "alpha_fraction": 0.5723425149917603, "alphanum_fraction": 0.5767716765403748, "avg_line_length": 44.1629638671875, "blob_id": "1823857659a4559a99b4c242a6cc122d2877b1ce", "content_id": "afd8b2dd96133c8a1199576bd09d7d77669ade35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6096, "license_type": "no_license", "max_line_length": 122, "num_lines": 135, "path": "/download_util.py", "repo_name": "mfa-darx/WWE-Network-Downloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nimport urllib3, certifi\nimport m3u8, json\nimport datetime\nimport sys, subprocess, os\nimport CONSTANTS\n\nclass download:\n\n def __init__(self):\n self.http = urllib3.PoolManager(headers=CONSTANTS.DOWNLOAD_HEADERS,\n cert_reqs='CERT_REQUIRED',\n ca_certs=certifi.where())\n\n def create_dirs(self):\n # If the download directory doesn't exist, we need to create it\n if not os.path.isdir(\"./{}\".format(CONSTANTS.OUTPUT_FOLDER)):\n os.mkdir(\"./{}\".format(CONSTANTS.OUTPUT_FOLDER))\n if not os.path.isdir(\"./{}\".format(CONSTANTS.TEMP_FOLDER)):\n os.mkdir(\"./{}\".format(CONSTANTS.TEMP_FOLDER))\n\n def get_index_m3u8(self, link):\n # Return the contents of the m3u8 playlist\n return(self.http.request('GET', link))\n\n def get_playlist_object(self, link):\n # Return the playlist as an object\n return(m3u8.loads(self.http.request('GET', link).data.decode('utf-8')))\n\n def write_data(self, data, location):\n # Append the data to the file. Data is in bytes\n f = open(location,\"ab+\")\n f.write(data)\n # Flush the buffer - hopefully this keeps our script from using up too much ram\n f.flush()\n\n def write_upto(self, part, name):\n # Write which file we just downloaded to a temporary file in JSON format\n f = open(name+\".part\",\"w\")\n json.dump({\"current_time\":part}, f)\n\n def read_part_file(self, filename):\n # Read which file we need to continue downloading from\n json_file = open(filename, \"r\")\n part = json.load(json_file)\n return float(part.get(\"current_time\"))\n\n def combine_videos(self, title, file_folder, keep_files=False):\n input_file = CONSTANTS.TEMP_FOLDER + \"/\" + title\n output_file = CONSTANTS.OUTPUT_FOLDER + \"/\" + file_folder + \"/\" + title\n metafile = CONSTANTS.TEMP_FOLDER + \"/\" + title + \"-metafile\"\n ffmpeg_command = ('ffmpeg \\\n -i \"{}.ts\"\\\n -i \"{}.aac\"\\\n -i \"{}\" -map_metadata 1\\\n -c copy\\\n \"{}.mp4\" -y'.format(input_file, input_file, metafile, output_file))\n \n subprocess.call(ffmpeg_command, shell=True)\n if not keep_files:\n os.remove(os.getcwd() + \"/\" + CONSTANTS.TEMP_FOLDER + \"/\" + title +\".aac\")\n os.remove(os.getcwd() + \"/\" + CONSTANTS.TEMP_FOLDER + \"/\" + title +\".aac.part\")\n os.remove(os.getcwd() + \"/\" + CONSTANTS.TEMP_FOLDER + \"/\" + title +\".ts\")\n os.remove(os.getcwd() + \"/\" + CONSTANTS.TEMP_FOLDER + \"/\" + title +\".ts.part\")\n os.remove(os.getcwd() + \"/\" + CONSTANTS.TEMP_FOLDER + \"/\" + title + \"-metafile\")\n\n def download_playlist(self, playlist, base_url, title, **kwargs):\n # Check if the download directory exists\n self.create_dirs()\n # Get the amount of files in the playlist\n files_to_download = len(playlist.segments)\n # set the title to our title and output file\n title = os.getcwd() + \"/\" + CONSTANTS.TEMP_FOLDER + \"/\" + title\n\n # Get the format of the file we are downloading.\n # Result should either be aac or ts\n format = playlist.segments[0].uri.split(\".\")[1].rsplit(\"?\",1)[0]\n\n # Timestamp of our current segment\n current_time = float(0)\n # Start and end times from the args that were passed\n end_time = float(kwargs['end_time'])\n start_from = float(kwargs['start_from'])\n # Total length of segments we want to download in seconds\n total_length = float(0)\n # Find the total length of a playlist\n for length in playlist.segments:\n total_length += length.duration\n\n try:\n # If we have already started to download a file and it still exists,\n # we will have a temp file called \".part\". We will open it to see\n # where we will continue downloading from\n if os.path.exists(\"{}.{}\".format(title, format)):\n start_from = self.read_part_file(\"{}.{}.part\".format(title, format))\n except:\n pass\n\n # Sometimes the inputted end time is too long or if we want to download the whole video,\n # we just set the end_time to when the video itself ends.\n if end_time > total_length or end_time == 0:\n end_time = total_length\n\n # For as long as we haven't tried to quit the program\n try:\n for i in playlist.segments:\n if current_time >= start_from and current_time <= end_time:\n current_file = i.uri.split('.{}'.format(format))[0]\n # Example: 66 ts files downloaded out of 121\n # Note: there is a bug where it will show all of the files in the playlist\n # even if we just want a portion of it.\n sys.stdout.write(\"\\r{} {} files downloaded out of {}\".format(current_file, format, files_to_download))\n\n # Get the base link for the audio files and then open the URL\n download_data = self.http.request('GET', base_url+i.uri)\n # Now we append the data to the download file and clear the programs buffer\n self.write_data(download_data.data, \"{}.{}\".format(title, format))\n # Clear the stdout internal buffer\n sys.stdout.flush()\n # After adding the downloaded data, we increment the duration\n current_time += i.duration\n # Save where we are upto in the download process\n self.write_upto(current_time, \"{}.{}\".format(title, format))\n # Since we downloaded the whole file we will delete out part\n #os.remove(title+\".\"+format+\".part\")\n except KeyboardInterrupt:\n # We want to cancel the current operation\n pass\n\n print(\"\\r{} files finished downloading\".format(format))\n\nif __name__ == \"__main__\":\n print(\"Please run python main.py instead.\")\n pass" }, { "alpha_fraction": 0.5169491767883301, "alphanum_fraction": 0.5349744558334351, "avg_line_length": 35.26829147338867, "blob_id": "0a3d81045fdf54ae3fa83a7d5416edbd214d2079", "content_id": "9eabcf96e0e5d3b14c472073c32f0c120b3a28a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7434, "license_type": "no_license", "max_line_length": 152, "num_lines": 205, "path": "/wwe.py", "repo_name": "mfa-darx/WWE-Network-Downloader", "src_encoding": "UTF-8", "text": "#!/usr/bin/python3\n\nimport os, sys\nimport subprocess\nfrom time import time\nimport arrow, datetime\nimport requests, json, m3u8\n\nimport CONSTANTS\n\n\nclass wwe_network:\n\n def __init__(self, user, password):\n\n with requests.Session() as self._session:\n self._session.headers.update(CONSTANTS.HEADERS)\n\n self.user = user\n self.password = password\n self.logged_in = False\n\n\n\n def _set_authentication(self):\n\n access_token = self.authorisationToken\n if not access_token:\n print(\"No access token found.\")\n return\n\n self._session.headers.update({'Authorization': 'Bearer {}'.format(access_token)})\n print(\"Succesfully logged in\")\n self.logged_in = True\n\n def login(self):\n\n payload = {\n \"id\": self.user,\n \"secret\": self.password\n }\n\n token_data = self._session.post('https://dce-frontoffice.imggaming.com/api/v2/login', json=payload, headers=CONSTANTS.REALM_HEADERS).json()\n if 'code' in token_data:\n print(\"Error while logging in. Possibly invalid username/password\")\n exit()\n\n\n self.authorisationToken = token_data['authorisationToken']\n self.refreshToken = token_data['refreshToken']\n\n self._set_authentication()\n\n # Get the m3u8 stream\n def m3u8_stream(self, stream_link):\n\n #https://dve-api.imggaming.com/v/70800?customerId=16&auth=1f7512c7c2b7474abf723188038b32c1&timestamp=1564126721496\n stream = self._session.get(stream_link, headers=CONSTANTS.REALM_HEADERS).json()\n\n return stream['hls']['url']\n\n def get_chapter_information(self, link, episode_title, chapterize=False):\n api_link = self._session.get('https://cdn.watch.wwe.com/api/page?path={}'.format(link)).json()\n\n entry = api_link[\"entries\"][0][\"item\"].get(\"relatedItems\")\n data = []\n for i in entry:\n if i.get(\"relationshipType\") == \"milestone\":\n start = int(i[\"item\"][\"customFields\"].get(\"StartPoint\") * 1000)\n end = int(i[\"item\"][\"customFields\"].get(\"EndPoint\") * 1000)\n\n title = i[\"item\"].get(\"title\")\n data.append([start, end, title])\n\n print(\"\\nStarting to write the metadata file\")\n meta_file = open(\"{}/{}-metafile\".format(CONSTANTS.TEMP_FOLDER, episode_title), \"w\")\n meta_file.write(\";FFMETADATA1\\n\\\ntitle={}\\n\".format(episode_title))\n print(\"Finished writing the metadata file\")\n\n if chapterize:\n print(\"\\nWriting chapter information\")\n for i in data:\n meta_file.write(\"[CHAPTER]\\n\\\nTIMEBASE=1/1000\\n\\\nSTART={}\\n\\\nEND={}\\n\\\ntitle={}\\n\\n\".format(str(i[0]), str(i[1]), i[2]))\n\n print(\"Finished writing chapter information\")\n\n print(\"\\nStarting to write the stream title\")\n meta_file.write(\"[STREAM]\\n\\\ntitle={}\".format(episode_title))\n print(\"Finished writing the stream title\\n\")\n meta_file.close()\n\n def _video_url(self, link):\n #playerUrlCallback=https://dve-api.imggaming.com/v/70800?customerId=16&auth=33d8c27ac15ff76b0af3f2fbfc77ba05&timestamp=1564125745670\n video_url = self._session.get('https://dce-frontoffice.imggaming.com/api/v2/stream/vod/{}'.format(link), headers=CONSTANTS.REALM_HEADERS).json()\n try:\n if video_url['status'] == 403:\n print(\"Your subscription is invalid. Quitting.\")\n exit()\n except:\n return video_url['playerUrlCallback'], video_url['videoId']\n\n def get_video_info(self, link):\n # Link: https://cdn.watch.wwe.com/api/page?path=/episode/This-Tuesday-in-Texas-1991-11831\n # We need DiceVideoId\n api_link = self._session.get('https://cdn.watch.wwe.com/api/page?path={}'.format(link)).json()\n\n # If we have an invalid link, quit\n try:\n if api_link[\"message\"]:\n print(\"Video link is invalid. Exiting now..\")\n return\n except:\n pass\n\n entry = api_link['entries'][0]['item']\n\n # If our event is a weekly/episodic show, add the date, season and episode number to the file name\n if entry[\"customFields\"].get(\"EventStyle\") == \"Episodic\":\n if entry[\"episodeNumber\"] < 10:\n ep_num = \"0\" + str(entry[\"episodeNumber\"])\n else:\n ep_num = entry[\"episodeNumber\"]\n\n file_date = arrow.get(\n entry[\"firstBroadcastDate\"], \"YYYY-MM-DDTHH:mm:ssZ\"\n )\n file_date = file_date.format(\"MM-DD-YYYY\")\n\n file_name = \"{} {} - S{}E{} - {}\".format(\n entry[\"customFields\"][\"Franchise\"],\n entry[\"episodeName\"]\n .replace(\"&\", \"and\")\n .replace(\":\", \"- \")\n .replace(\"'\", \"\")\n .replace(\"\\\"\", \"\")\n .replace(\"/\", \" \"),\n entry[\"releaseYear\"],\n ep_num,\n file_date,\n )\n elif entry[\"customFields\"].get(\"SeasonNumber\") and entry[\"customFields\"].get(\"EventStyle\") != \"PPV\":\n if entry[\"episodeNumber\"] < 10:\n ep_num = \"0\" + str(entry[\"episodeNumber\"])\n else:\n ep_num = entry[\"episodeNumber\"]\n\n file_date = arrow.get(\n entry[\"firstBroadcastDate\"], \"YYYY-MM-DDTHH:mm:ssZ\"\n )\n file_date = file_date.format(\"MM-DD-YYYY\")\n\n file_name = \"{} - S{}E{} - {}\".format(\n entry[\"customFields\"][\"SeriesName\"],\n entry[\"customFields\"].get(\"SeasonNumber\"),\n ep_num,\n entry[\"episodeName\"]\n .replace(\"&\", \"and\")\n .replace(\":\", \"- \")\n .replace(\"'\", \"\")\n .replace(\"\\\"\", \"\")\n .replace(\"/\", \" \"),\n )\n\n elif entry[\"customFields\"].get(\"EventStyle\") == \"PPV\":\n # If it is a PPV get the title and year into variables\n ppv_title = entry[\"episodeName\"]\n ppv_year = entry[\"releaseYear\"]\n # Check if the PPV already has the year in it. For example \"This Tuesday in Texas 1991\" has the year,\n # but \"WrestleMania 35\" doesn't. Since we don't want to have \"This Tuesday in Texas 1991 1991\" as\n # our filename we will just use the PPV title\n if str(ppv_year) in ppv_title:\n file_name = \"{} {}\".format(\n entry[\"customFields\"][\"Franchise\"], entry[\"episodeName\"]\n )\n else:\n file_name = \"{} {} {}\".format(\n entry[\"customFields\"][\"Franchise\"],\n entry[\"episodeName\"],\n entry[\"releaseYear\"],\n )\n else:\n if not entry.get('title'):\n raise Exception(\"Unrecognized event type\")\n file_name = (\n entry[\"title\"]\n .replace(\"&\", \"and\")\n .replace(\":\", \"- \")\n .replace(\"'\", \"\")\n .replace(\"\\\"\", \"\")\n .replace(\"/\", \" \")\n )\n\n video_url_resp = self._video_url(api_link['entries'][0]['item']['customFields']['DiceVideoId'])\n return video_url_resp[0], file_name, video_url_resp[1]\n\n\nif __name__ == \"__main__\":\n print(\"Please run python main.py instead.\")\n pass" } ]
8
ooxx5626/DataMining_HW1
https://github.com/ooxx5626/DataMining_HW1
c73759057dd328d6de9defd4402a0e7af14f4ea9
b9a34bbdc6bee963de56e9b58ca83df0ab4f0a67
d24b7b31444acd2418ffe16ad8825baaec700756
refs/heads/master
2018-12-26T14:31:35.526924
2018-10-24T02:02:49
2018-10-24T02:02:49
153,280,190
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5126892924308777, "alphanum_fraction": 0.5186607241630554, "avg_line_length": 33.77862548828125, "blob_id": "335bca33a2c276e91b464d98e59eecd353c00b80", "content_id": "ed3e57a9f65878757eb0b3cc0fc15bc7b3c8c68a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4689, "license_type": "no_license", "max_line_length": 123, "num_lines": 131, "path": "/start.py", "repo_name": "ooxx5626/DataMining_HW1", "src_encoding": "UTF-8", "text": "import node_class\r\nimport file_manger\r\nimport json\r\nfrom collections import defaultdict\r\ndef loadSimpDat():\r\n Bread =\"Bread\"\r\n Beer = \"Beer\"\r\n Milk = \"Milk\"\r\n Egg = \"Egg\"\r\n Coffee = \"Coffee\"\r\n simpDat = [[Bread, Milk, Beer], \r\n [Bread, Coffee], \r\n [Bread, Egg], \r\n [Bread, Milk, Coffee], \r\n [Milk, Egg], \r\n [Bread, Egg], \r\n [Milk, Egg], \r\n [Bread, Milk, Egg, Beer], \r\n [Bread, Milk, Egg]\r\n ]\r\n return simpDat\r\ndef newSimpDat(arr):\r\n newDat = []\r\n for a in arr:\r\n for i in range(0,arr[a]):\r\n newDat.append(a)\r\n return newDat\r\n\r\ndef get_table(datas):\r\n table=[]\r\n for tr in datas:\r\n for i in tr:\r\n if i not in table:\r\n table.append(i)\r\n return table\r\ndef getPattern(myHeaderTab, patterns):\r\n \r\n for top in myHeaderTab:\r\n pats = node_class.findPrefixPath(myHeaderTab[top][1])\r\n # pattern = {}\r\n nodeName = myHeaderTab[top][1].name\r\n node_findPre_list=[]\r\n for pat in pats:\r\n pat_context={\r\n \"pat\" : clear_frozenset(pat),\r\n \"count\" : pats[pat]\r\n }\r\n node_findPre_list.append(pat_context)\r\n # print(nodeName, pat) \r\n patterns[nodeName]=node_findPre_list\r\n\r\ndef clear_frozenset(string):\r\n return str(string).replace(\"frozenset({\",\"\").replace(\"})\",\"\").replace(\"'\",\"\").replace(',','').split(' ')\r\ndef checkIsexist(item, datas):\r\n exist_count = 0\r\n isexist = False\r\n for i in item:\r\n if i in datas :\r\n exist_count+=1\r\n if exist_count == len(item):\r\n isexist=True\r\n return isexist\r\n\r\ndef checkAnyexist(item_i, item_j):\r\n isexist = False\r\n for i in item_i:\r\n for j in item_j:\r\n if i == j:\r\n isexist = True\r\n return isexist\r\ndef associate(freqItems, simpDat):\r\n with open(\"save_data/associate.txt\", \"w+\") as f:\r\n rules_count = 0\r\n for item_i in freqItems:\r\n for item_j in freqItems:\r\n if item_i != item_j:\r\n item_i_count=0\r\n item_j_count=0\r\n if not (checkIsexist(item_i, item_j) or checkIsexist(item_j, item_i) or checkAnyexist(item_i, item_j)):\r\n for datas in simpDat:\r\n if checkIsexist(item_i, datas):\r\n item_i_count += 1\r\n \r\n if checkIsexist(item_i, datas) and checkIsexist(item_j, datas):\r\n item_j_count += 1\r\n if item_i_count != 0 and item_j_count != 0 and item_j_count/item_i_count>=0.5 and item_j_count!=1:\r\n print(\"{} >>> {}\".format(item_i, item_j))\r\n # print(\"item_i_count : {}\".format(item_i_count))\r\n # print(\"item_j_count : {}\".format(item_j_count))\r\n print(\"conf : {}\".format(item_j_count/item_i_count))\r\n f.write(\"{} >>> {}\\n\".format(item_i, item_j))\r\n # f.write(\"item_i_count : {}\\n\".format(item_i_count))\r\n # f.write(\"item_j_count : {}\\n\".format(item_j_count))\r\n f.write(\"conf : {}\\n\".format(item_j_count/item_i_count))\r\n rules_count +=1\r\n print(\"rules_count : {}\".format(rules_count))\r\n f.write(\"rules_count : {}\\n\".format(rules_count))\r\n\r\ndef do_IBMData():\r\n # simpDat = loadSimpDat()\r\n simpDat = file_manger.readAndParser(\"data.ntrans_1.ascii.tlen_5.nitems_1.npats_2\")\r\n table = get_table(simpDat)\r\n print(simpDat)\r\n file_manger.save_csv(table, simpDat, 'weka_IBM.csv') \r\n initSet = node_class.createInitSet(simpDat)\r\n myFPtree, myHeaderTab = node_class.createTree(initSet, 2) \r\n save = []\r\n myFPtree.disp(save = save)\r\n for s in save:\r\n print(s)\r\n file_manger.save_tree(save, \"save.txt\")\r\n freqItems = []\r\n node_class.mineTree(myFPtree, myHeaderTab, 2, set([]), freqItems)\r\n associate(freqItems, simpDat)\r\n\r\n print(freqItems)\r\ndef do_KaggleData():\r\n simpDat = file_manger.readKaggle()\r\n file_manger.save_csv_K(simpDat[0], simpDat, 'weka_K.csv') \r\n initSet = node_class.createInitSet(simpDat[1:])\r\n print(initSet)\r\n myFPtree, myHeaderTab = node_class.createTree(initSet, 10) \r\n save = []\r\n myFPtree.show()\r\n file_manger.save_tree(save, \"save.txt\")\r\n freqItems = []\r\n node_class.mineTree(myFPtree, myHeaderTab, 10, set([]), freqItems)\r\n associate(freqItems, simpDat)\r\n print(freqItems)\r\ndo_IBMData()\r\n# do_KaggleData()\r\n\r\n" }, { "alpha_fraction": 0.6032171845436096, "alphanum_fraction": 0.6129356622695923, "avg_line_length": 34.95783233642578, "blob_id": "fb74177685d6ed26d3a49b0bce92614d73b0b09f", "content_id": "f6890d4a9bcd307f1ec5bf9b066df18dc5f2e8bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6736, "license_type": "no_license", "max_line_length": 128, "num_lines": 166, "path": "/FP_Growth.py", "repo_name": "ooxx5626/DataMining_HW1", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 18 21:24:10 2018\n\n@author: Tony\n\"\"\"\n\nimport file_manger\nclass treeNode: #定義一個Tree\n def __init__(self,nameValue, numCount, parentNode):\n self.name = nameValue #紀錄item的名字\n self.count = numCount #計算次數\n self.parent = parentNode #存放節點的父節點\n self.children = {} #存放節點的子節點\n self.nodeLink = None #連接有相關的item \n \n def inc(self, numOccur): #對count變數增加給定值\n self.count += numOccur\n \n def show(self, ind = 1): #顯示Tree架構 \n print(\" \" * ind, self.name, \" \",self.count)\n for child in self.children.values():\n child.show(ind + 1)\n\ndef createTree(dataSet, minSup): #構建FP-tree\n headerTable = {}\n for trans in dataSet: #第一次scan,建item對應的次數之表格\n for item in trans:\n headerTable[item] = headerTable.get(item, 0) + dataSet[trans]\n for k in list(headerTable): #將不滿足min-support的item刪除\n if headerTable[k] < minSup:\n del(headerTable[k])\n freqItemSet = set(headerTable.keys()) \n #print ('freqItemSet: ',freqItemSet)\n \n if len(freqItemSet) == 0: #若是空item集,回傳None\n return None, None\n for k in sorted(headerTable.keys()): #建Table,存放指向有關聯的item\n headerTable[k] = [headerTable[k], None]\n #print ('headerTable: ',headerTable)\n retTree = treeNode('Null Set', 1, None) #初始化tree(Root)\n \n for tranSet, count in dataSet.items(): #第二次scan,建FP-tree\n localD = {} #對一個itemset做tranSet,紀錄每個item的次數\n for item in tranSet:\n if item in freqItemSet: #只對frequentItemset做排序\n localD[item] = headerTable[item][0] \n if len(localD) > 0:\n orderedItems = [v[0] for v in sorted(localD.items(), key=lambda p: p[1], reverse=True)] #排序\n updateTree(orderedItems, retTree, headerTable, count) #更新FP-tree\n return retTree, headerTable #回傳Tree跟表格\n\ndef updateTree(items, inTree, headerTable,count):\n if items[0] in inTree.children: # 檢查是否存在此節點\n inTree.children[items[0]].inc(count) # 存在則count增加\n \n else: #若不存在,則創建一個新的Treenoe並將其作為子節點加到Tree中 \n inTree.children[items[0]] = treeNode(items[0],count,inTree) #創建新節點\n if headerTable[items[0]][1]==None: #更新表格或前一個有關聯的item指向新節點,若原来不存在該類別,更新表格\n headerTable[items[0]][1] = inTree.children[items[0]]\n else:\n updateHeader(headerTable[items[0]][1],inTree.children[items[0]])\n \n if len(items) > 1: #對剩下的item迭代\n updateTree(items[1::], inTree.children[items[0]], headerTable, count) \n \ndef updateHeader(nodeToTest, targetNode): #獲得表格中該item對應的尾節點,然後將它指向新節點(targetNode)\n while (nodeToTest.nodeLink != None):\n nodeToTest = nodeToTest.nodeLink\n nodeToTest.nodeLink = targetNode \n\ndef loadSimpDat(): #生成itemsets\n simpDat = [['Bread', 'Milk', 'Beer'],\n ['Bread', 'Coffee'],\n ['Bread', 'Egg'],\n ['Bread', 'Milk', 'Coffee'],\n ['Milk', 'Egg'],\n ['Bread', 'Egg'],\n ['Milk', 'Egg'],\n ['Bread', 'Milk', 'Egg', 'Beer'],\n ['Bread', 'Milk', 'Egg']]\n return simpDat\n\ndef createInitSet(dataSet):\n retDict = {}\n for trans in dataSet: \n retDict[frozenset(trans)] = retDict.get(frozenset(trans), 0)+1\n return retDict\n\n#給定itemset生成一個條件模組(前綴path)\ndef findPrefixPath(basePat,treeNode): #basePat表示輸入的frequentitem,treeNode為當前FP-tree中對應的第一個節點(可在函數外部通過headerTable[basePat][1]獲得)\n condPats = {}\n while treeNode != None:\n prefixPath = [] \n ascendTree(treeNode, prefixPath)\n if len(prefixPath) > 1:\n condPats[frozenset(prefixPath[1:])] = treeNode.count\n treeNode = treeNode.nodeLink\n return condPats #回傳函数的條件模組\n\ndef ascendTree(leafNode, prefixPath): #輔助函數,直接修改prefixPath的值,將當前節點leafNode添加到prefixPath的尾端,然後遞歸添加其父節點\n if leafNode.parent != None:\n prefixPath.append(leafNode.name)\n ascendTree(leafNode.parent, prefixPath) \n \n#遞歸查找頻繁項集\n# inTree、headerTable:由createTree()函數生成的itemset的FP-tree\n# minSup:表示最小支持度\n# preFix:請傳入一個空集合(set([])),將在函數中用於保存當前前綴\n# freqItemList:請傳入一個空列表([]),將用來儲存生成的frequentitemset\ndef mineTree(inTree,headerTable,minSup,preFix,freqItemList):\n bigL = [v[0] for v in sorted(headerTable.items(), key=lambda p: str(p[1]))]\n for basePat in bigL:\n newFreqSet = preFix.copy()\n newFreqSet.add(basePat)\n freqItemList.append(newFreqSet)\n condPattBases = findPrefixPath(basePat, headerTable[basePat][1])\n myConTree,myHead = createTree(condPattBases, minSup)\n \n if myHead != None:\n # print('conditional tree for :', newFreqSet)\n # myConTree.show()\n mineTree(myConTree, myHead, minSup, newFreqSet, freqItemList)\n\ndef fpGrowth(dataSet, minSup=2):\n initSet = createInitSet(dataSet)\n myFPtree, myHeaderTab = createTree(initSet, minSup)\n freqItems = []\n mineTree(myFPtree, myHeaderTab, minSup, set([]), freqItems)\n return freqItems\n\nif __name__==\"__main__\":\n \n #測試itemset和creat-tree\n# result = {\n# 'a': lambda x: x * 5,\n# 'b': lambda x: x + 7,\n# 'c': lambda x: x - 2\n# }['a'](1)\n # x=float(\"1.78\")\n # x = round(x)\n # print(x)\n datas = file_manger.readKaggle()\n \n file_manger.save_file(datas)\n# simpDat = loadSimpDat()\n# initSet = createInitSet(simpDat)\n# myFPtree, myHeaderTab = createTree(initSet, 2)\n# myFPtree.show()\n# # \n# # #測試findPrefixPath\n# # \n# for index in myHeaderTab:\n# print(index,findPrefixPath('Bread', myHeaderTab[index][1]))\n# # print(\"z\",findPrefixPath('z', myHeaderTab['z'][1]))\n# # print(\"r\",findPrefixPath('r', myHeaderTab['r'][1]))\n# # \n# # #測試mineTree\n# # \n# # freqItems = []\n# # mineTree(myFPtree, myHeaderTab, 2, set([]), freqItems)\n# # print(freqItems)\n \n# dataSet = loadSimpDat()\n# freqItems = fpGrowth(dataSet)\n# print(freqItems)" }, { "alpha_fraction": 0.6128908395767212, "alphanum_fraction": 0.6205639839172363, "avg_line_length": 44.12389373779297, "blob_id": "e28ebb943fe9c4226d175bfc3ecdd5f220105650", "content_id": "1ccd28f36064b1cdf569680f44163d5d6b36e538", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5359, "license_type": "no_license", "max_line_length": 105, "num_lines": 113, "path": "/node_class.py", "repo_name": "ooxx5626/DataMining_HW1", "src_encoding": "UTF-8", "text": "\r\n# -*- coding: utf-8 -*-\r\nclass treeNode:\r\n def __init__(self, nameValue, numOccur, parentNode):\r\n self.name = nameValue\r\n self.count = numOccur\r\n self.nodeLink = None\r\n self.parent = parentNode #needs to be updated\r\n self.children = {} \r\n#increments the count variable with a given amount \r\n def inc(self, numOccur):\r\n self.count += numOccur\r\n#display tree in text. Useful for debugging \r\n def disp(self, ind=1, save=[]):\r\n save.append(' '*ind+ self.name+ ' '+ str(self.count))\r\n # print (' '*ind, self.name, ' ', self.count)\r\n for child in self.children.values():\r\n child.disp(ind+1, save = save)\r\n\r\n def show(self, ind=1):\r\n print (' '*ind, self.name, ' ', self.count)\r\n for child in self.children.values():\r\n child.show(ind+1)\r\ndef createTree(dataSet, minSup): #構建FP-tree\r\n headerTable = {}\r\n for trans in dataSet: #第一次scan,建item對應的次數之表格\r\n for item in trans:\r\n headerTable[item] = headerTable.get(item, 0) + dataSet[trans]\r\n for k in list(headerTable): #將不滿足min-support的item刪除\r\n if headerTable[k] < minSup:\r\n del(headerTable[k])\r\n freqItemSet = set(headerTable.keys()) \r\n #print ('freqItemSet: ',freqItemSet)\r\n \r\n if len(freqItemSet) == 0: #若是空item集,回傳None\r\n return None, None\r\n for k in headerTable: #建Table,存放指向有關聯的item\r\n headerTable[k] = [headerTable[k], None]\r\n #print ('headerTable: ',headerTable)\r\n retTree = treeNode('Null Set', 1, None) #初始化tree(Root)\r\n \r\n for tranSet, count in dataSet.items(): #第二次scan,建FP-tree\r\n localD = {} #對一個itemset做tranSet,紀錄每個item的次數\r\n for item in tranSet:\r\n if item in freqItemSet: #只對frequentItemset做排序\r\n localD[item] = headerTable[item][0] \r\n if len(localD) > 0:\r\n orderedItems = [v[0] for v in sorted(localD.items(), key=lambda p: p[1], reverse=True)] #排序\r\n updateTree(orderedItems, retTree, headerTable, count) #更新FP-tree\r\n return retTree, headerTable #回傳Tree跟表格\r\n\r\ndef updateTree(items, inTree, headerTable, count):\r\n if items[0] in inTree.children: #check if orderedItems[0] in retTree.children\r\n inTree.children[items[0]].inc(count) #incrament count\r\n else: #add items[0] to inTree.children\r\n inTree.children[items[0]] = treeNode(items[0], count, inTree)\r\n # print(\"headerTable[items[0] : \",items[0])\r\n if headerTable[items[0]][1] == None: #update header table \r\n headerTable[items[0]][1] = inTree.children[items[0]]\r\n else:\r\n updateHeader(headerTable[items[0]][1], inTree.children[items[0]])\r\n if len(items) > 1:#call updateTree() with remaining ordered items\r\n updateTree(items[1::], inTree.children[items[0]], headerTable, count)\r\ndef updateHeader(nodeToTest, targetNode): #this version does not use recursion\r\n while (nodeToTest.nodeLink != None): #Do not use recursion to traverse a linked list!\r\n nodeToTest = nodeToTest.nodeLink\r\n nodeToTest.nodeLink = targetNode\r\n\r\ndef createInitSet(dataSet):\r\n retDict = {}\r\n for trans in dataSet: \r\n retDict[frozenset(trans)] = retDict.get(frozenset(trans), 0)+1\r\n return retDict\r\n\r\ndef ascendTree(leafNode, prefixPath): #ascends from leaf node to root\r\n if leafNode.parent != None:\r\n prefixPath.append(leafNode.name)\r\n ascendTree(leafNode.parent, prefixPath)\r\ndef findPrefixPath(treeNode): #treeNode comes from header table\r\n condPats = {}\r\n while treeNode != None:\r\n prefixPath = []\r\n ascendTree(treeNode, prefixPath)\r\n if len(prefixPath) > 1: \r\n condPats[frozenset(prefixPath[1:])] = treeNode.count\r\n treeNode = treeNode.nodeLink\r\n return condPats\r\ndef mineTree(inTree,headerTable,minSup,preFix,freqItemList):\r\n bigL = [v[0] for v in sorted(headerTable.items(), key=lambda p: str(p[1]))]\r\n for basePat in bigL:\r\n newFreqSet = preFix.copy()\r\n newFreqSet.add(basePat)\r\n freqItemList.append(newFreqSet)\r\n condPattBases = findPrefixPath(headerTable[basePat][1])\r\n myConTree,myHead = createTree(condPattBases, minSup)\r\n \r\n if myHead != None:\r\n # print('conditional tree for :', newFreqSet)\r\n # myConTree.show()\r\n mineTree(myConTree, myHead, minSup, newFreqSet, freqItemList)\r\n# def updateHeaderTable(dataSet, minSup, headerTable):\r\n# for trans in dataSet:#first pass counts frequency of occurance\r\n# for item in trans:\r\n# headerTable[item] = headerTable.get(item, 0) + dataSet[trans]\r\n# print(\"headerTable : \", headerTable)\r\n# print(\"dataSet : \", dataSet)\r\n# for k in list(headerTable): #remove items not meeting minSup\r\n# if headerTable[k] < minSup: \r\n# del(headerTable[k])\r\n# freqItemSet = set(headerTable.keys())\r\n# # print('freqItemSet: {}'.format(freqItemSet))\r\n# if len(freqItemSet) == 0: return None, None #if no items meet min support -->get out\r\n# for k in headerTable:\r\n# headerTable[k] = [headerTable[k], None] #reformat headerTable to use Node link " } ]
3
Katya-A/Lesson5
https://github.com/Katya-A/Lesson5
b7ee4ebb5c37d7b5880d15387379defdf1c954c4
350d7ad84a3d0359d438c7319d45297aaebd21d8
18cf12836987d03fed27e160cbe1c0398aea5071
refs/heads/master
2023-07-11T03:55:54.574058
2021-08-27T21:42:09
2021-08-27T21:42:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5098901391029358, "alphanum_fraction": 0.5846154093742371, "avg_line_length": 20.714284896850586, "blob_id": "f631f969743c8ce12200d4ad855977a7711666b2", "content_id": "aec083ca2679ab8efe5885898166318e1ba0df65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 36, "num_lines": 21, "path": "/1.py", "repo_name": "Katya-A/Lesson5", "src_encoding": "UTF-8", "text": "def num_gen():\n for odd_nums in range(1, n + 1):\n if odd_nums % 2 != 0:\n yield odd_nums\n\n\nn = 15\nodd_to_15 = []\n# for odd_nums in range(1, n + 1):\n# if odd_nums % 2 == 0:\n# odd_to_15.append(odd_nums)\nodd_to_15 = num_gen()\nprint(odd_to_15)\nprint(next(odd_to_15))\nprint(next(odd_to_15))\nprint(next(odd_to_15))\nprint(next(odd_to_15))\nprint(next(odd_to_15))\nprint(next(odd_to_15))\nprint(next(odd_to_15))\nprint(next(odd_to_15))" }, { "alpha_fraction": 0.29411765933036804, "alphanum_fraction": 0.5176470875740051, "avg_line_length": 42, "blob_id": "65da4466bcf6c1e8a888b20f42eca24eb9a949fc", "content_id": "13c8e134cc0fde2ff822a316d0f6b546e329f8e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "no_license", "max_line_length": 53, "num_lines": 2, "path": "/5.py", "repo_name": "Katya-A/Lesson5", "src_encoding": "UTF-8", "text": "src = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]\nprint(list(map(lambda x: src)))" }, { "alpha_fraction": 0.36206895112991333, "alphanum_fraction": 0.5057471394538879, "avg_line_length": 23.85714340209961, "blob_id": "61aab40a01d44ab865c09b12fc558e94d3798e0e", "content_id": "5847c771fea2c8290a379262623cb7e4d7b503b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 174, "license_type": "no_license", "max_line_length": 54, "num_lines": 7, "path": "/4.py", "repo_name": "Katya-A/Lesson5", "src_encoding": "UTF-8", "text": "src = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]\nresult = []\nfor i in range(len(src) - 1):\n if src[i] < src[i + 1]:\n result.append(src[i + 1])\n\nprint(result)\n" }, { "alpha_fraction": 0.6330472230911255, "alphanum_fraction": 0.6545064449310303, "avg_line_length": 24.94444465637207, "blob_id": "57f5a374041418e0307aa4f7352d68e158de01ae", "content_id": "77b8a90a68d7b40f1254b9400546a7a6744fa7c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 514, "license_type": "no_license", "max_line_length": 77, "num_lines": 18, "path": "/3.py", "repo_name": "Katya-A/Lesson5", "src_encoding": "UTF-8", "text": "def tutors_klasses_gen():\n for tu, kl in zip(tutors, klasses):\n yield tu, kl\n\n\ntutors = ['Иван', 'Анастасия', 'Петр', 'Сергей', 'Дмитрий', 'Борис', 'Елена']\nklasses = ['9А', '7В', '9Б', '9В', '8Б', '10А', '10Б', '9А']\n\niterator = tutors_klasses_gen()\nprint(tutors_klasses_gen())\n# print(list(iterator))\nprint(next(iterator))\nprint(next(iterator))\nprint(next(iterator))\nprint(next(iterator))\nprint(next(iterator))\nprint(next(iterator))\nprint(next(iterator))" } ]
4
huzengjian/RNN-LMP-Forecasting
https://github.com/huzengjian/RNN-LMP-Forecasting
e556648e7875110075e0a78207ee740fba41f564
a43195d95108ce21a82751869eec4c09cc64421f
1ccb26a15ed2bfd82028a9ac60b55ef0aecf3e12
refs/heads/master
2021-01-10T10:10:36.719147
2016-02-18T17:50:37
2016-02-18T17:50:37
51,965,763
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6404644250869751, "alphanum_fraction": 0.6572801470756531, "avg_line_length": 38.44210433959961, "blob_id": "42aa1d7eb36bf0b8d599e2396c00946a7f99d163", "content_id": "d70b40ea440af589f6f85f8f84c44fdf2d64d50e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7493, "license_type": "no_license", "max_line_length": 253, "num_lines": 190, "path": "/testRNN.py", "repo_name": "huzengjian/RNN-LMP-Forecasting", "src_encoding": "UTF-8", "text": "# python testRNN.py 0 100 100 10 1000 MISO\n\n# features: dim 1: hours (1-24) dim 2: days (1-1000+). dim 3: features (1-10+)\n# targets: dim 1: hours (1-24) dim 2: days (1-1000+). dim 3: features (1-10+)\n\n\nfrom rnn_minibatch import MetaRNN\nimport pandas as pd\nimport numpy as np\nfrom dateutil import parser\nimport time\nfrom datetime import date\nimport os,sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport cPickle as pickle\n\nhours_in_day = 24\nWeekDayToInt = dict(zip(\n\t[\"Monday\",\"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\",\"Sunday\"], \n\trange(7)))\n\nWinterMonths = [11,12,1,2]\n\t\n\n# Market Study Parameters. 1. Name of the target column 2. list of columns to be dropped 3. True = use MAPE False = use MAE\nMarketStudyParams = dict(zip(\n\t['MISO', 'PJM', 'MISO2','PJM2','Malin-NP'],\n\t[['Price', ['Date','HE','Day.of.Week', 'Prev.Week.Price','Total.RTO.Load','Actual.Load','Wtd.Avg.Index'], True],\n\t ['Price', ['Date','HE','Day.of.Week', 'Prev.Week.Price','Total.RTO.Load','Actual.Load','Wtd.Avg.Index'], True],\n\t #['Price', ['Date','HE','Day.of.Week'], True],\n\t ['Price', ['Date','HE','Weekday'], True],\n\t ['Price', ['Date','HE','Weekday'], True],\n\t ['MalinNP15Spread', ['MalinPrice','NP15Price','Date','HourlyLaggedSpread'], False]])) \n\t \n# ['Price', ['Date','Total.RTO.Load','Actual.Load','Prev.Day.Avg.Price','Prev.Day.Price','Prev.Week.Price'], True],\n# ['Price', ['Date','HE','Prev.Week.Price','Total.RTO.Load','Actual.Load','Prev.Day.Avg.Load','Prev.Week.Load','Wtd.Avg.Index','Prev.Day.Gas.Price','Wind.Gen'], True],\t \ndef normalize(x):\n\tmaxX = max(x)\n\tminX = min(x)\n\treturn np.array([(float(i) - minX)/(maxX - minX) for i in x])\n\ndef load(file, market):\n\tdf = pd.read_csv(file)\n\tdf['Date'] = [parser.parse(d) for d in df.Date]\n\tdf['Month'] = [d.month for d in df.Date]\n\tdf['Day'] = [d.day for d in df.Date]\n\tdropped_columns = MarketStudyParams[market][1]\n\tdf.drop(dropped_columns, axis=1, inplace=True, errors='ignore')\n\t\n\tdf = df.apply(lambda x: x.fillna(x.mean()),axis=0)\n\t#df.dropna(inplace = True)\n\t\n\ttarget_column = MarketStudyParams[market][0]\n\tif market != 'Malin-NP':\n\t\tdf = df[df[target_column] > 5]\n\t\t#df['Day.of.Week'] = [WeekDayToInt[x] for x in df['Day.of.Week']]\n\t\t\n\t#train_columns = set(['Total.RTO.Load','Prev.Week.Load','Wind.Gen','is.Hol.Wknd','Day.of.Week','Prev.Day.Price','Price'])\n\t#columns = set(list(df))-set(['Prev.Day.Avg.Price','Prev.Day.Price','Prev.Week.Price'])\n\t#df = df[list(columns)]\n\t\n\t\n\t#df = df[~df['Month'].isin(WinterMonths)]\n\treturn df\n\t\ndef prepareAllData(df, target_column, n_steps = 24):\n\ttrain_columns = set(list(df))-set([target_column])\n\tprint train_columns\n\tnrows = len(df.index)\n\n\tallX = df.as_matrix(train_columns).transpose() \n\tallX = np.asmatrix(np.array([normalize(x) for x in allX])).transpose() # Normalize all columns\n\t\n\t#print allX\n\tallY = [[x] for x in np.asarray(df[target_column])]\t#target column\n\t\n\ttargets = []\n\tfeatures = []\n\t\n\tfor hour in xrange(n_steps):\n\t\ttraining_row = []\n\t\ttesting_row = []\n\t\tfor i in xrange(nrows/n_steps):\n\t\t\tidx = i*n_steps + hour\n\t\t\ttraining_row.append(np.squeeze(np.asarray(allX[idx])))\n\t\t\ttesting_row.append(allY[idx])\n\t\tfeatures.append(training_row)\n\t\ttargets.append(testing_row)\n\n\t#print features\n\t#print targets\n\t\t\n\treturn np.asarray(features),np.asarray(targets)\n\n# Test the trained model\ndef testRNN(test_start, test_end, model, features, targets, use_mape = True):\n\tmapes = []\n\tmaes = []\n\tall_predict =[]\n\tall_target = []\n\tfor idx in xrange(test_start, test_end):\n\t\tguess = model.predict(features[:, idx, :][:, np.newaxis, :])\n\t\tguess = [j for x in guess for j in x]\n\t\terror = abs(targets[:,idx,:] - guess)\n\t\tmaes.append(error)\n\t\tmapes.append(np.mean(error/abs(targets[:,idx,:])))\n\t\tall_predict += [j for x in guess for j in x]\n\t\tall_target += [j for x in targets[:,idx,:] for j in x]\n\n\td = {'target': all_target, 'predict': all_predict}\n\tavg_mape = np.mean(mapes)\n\tavg_mae = np.mean(maes)\n\tprint 'Avg {0} from Day {1} to {2} = {3}'.format(\"MAPE\" if use_mape else \"MAE\", test_start, test_end, avg_mape if use_mape else avg_mae)\n\treturn pd.DataFrame(data=d), avg_mape if use_mape else avg_mae\n\n# Perform whole round of training + testing for a particular market like PJM or MISO\ndef train_and_test(market, start_hour, training_days, testing_days, n_hidden, n_epochs):\n\tdir = \"data\\\\{0}\\\\\".format(market)\n\tfiles = [f for f in listdir(dir) if isfile(join(dir, f))]\t# all the testing files\n\n\tresult_dir = '{0}Result\\\\{1}'.format(dir, time.time()) # result dir with timestamp\n\tos.mkdir(result_dir)\n\tavg_errs = []\n\tuse_mape = MarketStudyParams[market][2]\n\ttarget_column = MarketStudyParams[market][0]\n\t\n\tfor file in files:\n\t\tprint \"Testing file {0}...\".format(file)\n\t\terrs = []\n\t\tresult_df = pd.DataFrame()\n\t\trawdata_df = load(dir+file, market)\n\t\t\n\t\tfeatures,targets = prepareAllData(rawdata_df, target_column, n_steps = hours_in_day)\n\t\ttotal_days = features.shape[1]\n\t\ttotal_features = len(rawdata_df.columns)-1\n\t\t\n\t\tprint 'Total days = {0}, # of features = {1}'.format(total_days,total_features)\n\t\tmodel = MetaRNN(n_in=total_features, n_hidden=n_hidden, n_out=1,\n\t\t\t\t\t\t\tlearning_rate=0.01, learning_rate_decay=0.99,\n\t\t\t\t\t\t\tn_epochs=n_epochs, batch_size=100, activation='sigmoid', L1_reg = 0,\n\t\t\t\t\t\t\tL2_reg=0)\n\t\t#Training\n\t\t\n\t\ttraining_start = start_hour\n\t\ttraining_end = training_start + training_days-1\n\t\tmodel.fit(features[:,training_start:training_end,:], targets[:,training_start:training_end,:], validate_every=100, optimizer='bfgs')\n\t\t#pickle.dump(model, open( \"{0}\\\\model.p\".format(result_dir), \"wb\" ) )\n\t\t\n\t\tdf, avg_mape = testRNN(training_start, training_end, model, features, targets, use_mape)\n\t\tdf.to_csv('{0}\\\\{1}_training_result.csv'.format(result_dir, file))\n\t\t\n\t\tfor testing_start in xrange(start_hour+training_days, total_days, testing_days):\n\t\t\t#training_start = testing_start - training_days\n\t\t\t#training_end = testing_start-1\n\t\t\t\n\t\t\t#print \"\\nTraining...\"\n\t\t\t#model.fit(features[:,training_start:training_end,:], targets[:,training_start:training_end,:], validate_every=100, optimizer='bfgs')\n\t\t\t#training_df, training_mae = testRNN(training_start, training_end, model, features, targets, use_mape)\n\n\t\t\ttesting_end = min(testing_start + testing_days - 1, total_days)\n\t\t\tprint \"Testing...\"\n\t\t\tdf, err = testRNN(testing_start, testing_end, model, features, targets, use_mape)\n\t\t\tresult_df = pd.concat([result_df, df])\n\t\t\terrs.append(err)\n\t\t\n\t\tavg_err = np.mean(errs)\n\t\tprint 'For {0}, avg {1} = {2}'.format(file, \"MAPE\" if use_mape else \"MAE\", avg_err)\n\t\t\n\t\tavg_errs.append(avg_err)\n\t\tresult_df.to_csv('{0}\\\\{1}_result.csv'.format(result_dir, file))\n\t\t\t\n\tprint 'Average {0} for {1} = {2}'.format(\"MAPE\" if use_mape else \"MAE\", market, np.mean(avg_errs))\n\n\t\ndef main(argv):\n\tif len(argv) < 7:\n\t\tprint '\\nSorry - invalid parameters.\\nUsage:\\n==========Params=========\\nStart Hour \\n# of training days \\n# of testing days \\n# of hidden nodes \\n# of iterations \\nMarket(PJM|MISO|SPP)\\n====================='\n\t\treturn\n\tstart_hour = int(argv[1])\n\ttraining_days = int(argv[2])\n\ttesting_days = int(argv[3])\n\tn_hidden = int(argv[4])\n\tn_epochs = int(argv[5])\n\tprint '\\n==========Params=========\\nStart Hour = {0}\\n# of training days = {1} \\n# of testing days = {2} \\n# of hidden nodes = {3} \\n# of iterations = {4}\\n=========================\\n'.format(start_hour, training_days, testing_days, n_hidden, n_epochs)\n\tfor market in argv[6:]:\n\t\ttrain_and_test(market, start_hour, training_days, testing_days, n_hidden, n_epochs)\n\nif __name__ == \"__main__\":\n main(sys.argv)" }, { "alpha_fraction": 0.6929824352264404, "alphanum_fraction": 0.7982456088066101, "avg_line_length": 21.799999237060547, "blob_id": "f3d17268b6dd72533c2214e0d996f0ba53eb6408", "content_id": "5dfb05b6597ba519dde93dbea4e53fdc90630277", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 114, "license_type": "no_license", "max_line_length": 38, "num_lines": 5, "path": "/README.md", "repo_name": "huzengjian/RNN-LMP-Forecasting", "src_encoding": "UTF-8", "text": "# RNN-LMP-Forecasting\nRNN implementation of LMP forecasting\n\nSample usage:\npython testRNN.py 0 100 100 10 500 PJM\n" } ]
2
gafton/iboto
https://github.com/gafton/iboto
a5a165e5795a0430992fe85df725c4e3fca4e59e
fb58fb3beae8fd596c8723e63d1849ec5b194021
c3c4481dcdd2adffdad82e8986c350a6bafaf895
refs/heads/master
2020-04-03T21:56:05.787778
2010-12-08T23:53:04
2010-12-08T23:55:42
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5691331624984741, "alphanum_fraction": 0.5773696899414062, "avg_line_length": 34.43540573120117, "blob_id": "2fb5d203979af9c560445abb6691d16695a3e4af", "content_id": "a435a4427a143a359edb3460237dc595029e66f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 22218, "license_type": "no_license", "max_line_length": 180, "num_lines": 627, "path": "/ipy_profile_ec2.py", "repo_name": "gafton/iboto", "src_encoding": "UTF-8", "text": "# ipython module\n\nimport os, re, time, optparse, ConfigParser\nimport IPython.ipapi\nfrom IPython.ipstruct import Struct\nimport boto.ec2\nimport config\nimport socket\n\n# TODO better exception handling in completers\n# TODO handle spaces in tags (completion)\n# TODO autogenerate ec2run docstring\n\nip = IPython.ipapi.get()\nregion = getattr(config, 'DEFAULT_REGION', 'us-east-1')\ncreds = dict(aws_access_key_id=config.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=config.AWS_SECRET_ACCESS_KEY)\nec2 = boto.ec2.connect_to_region(region, **creds)\nip.user_ns['ec2'] = ec2\n\n# Find out our user id - query for a security group\nsg = ec2.get_all_security_groups()[0]\nowner_id = sg.owner_id\n\n######################################################\n# Helper functions\n######################################################\n\nre_allowed_chars = re.compile(r'[^a-z0-9_]+')\ndef to_slug(n):\n n = n.lower()\n n = re_allowed_chars.sub('_', n)\n return n\n\ndef iter_instances(reservations):\n for r in reservations:\n for i in r.instances:\n yield i\n \ndef list_instances(reservations):\n return list(iter_instances(reservations))\n\ndef firstinstance(reservations):\n for r in reservations:\n for i in r.instances:\n return i\n return None\n\ndef build_ami_list():\n # AMI list\n ami = dict()\n for fname in os.listdir('ami'):\n fname = os.path.join('ami', fname)\n cfg = ConfigParser.ConfigParser()\n cfg.read(fname)\n s = str(ec2.region.name)\n if cfg.has_section(s):\n for o in cfg.options(s):\n ami[o] = cfg.get(s, o)\n\n # Add custom AMIs\n for img in ec2.get_all_images(owners=[owner_id]):\n n = img.location.split('/')[-1]\n n = re.sub(r'\\.manifest\\.xml$', '', n)\n n = to_slug(n)\n ami[n] = str(img.id)\n \n return ami\nami = build_ami_list()\n\ndef expose_magic(*args):\n def _d(fn):\n for arg in args:\n ip.expose_magic(arg, fn)\n return fn\n return _d\n\n######################################################\n# magic ec2run\n######################################################\n\ndef resolve_ami(arg):\n amiid = None\n if arg.startswith('ami-'):\n amiid = arg\n elif arg in ami:\n amiid = ami[arg]\n return amiid\n\nclass CustomOptionParser(optparse.OptionParser):\n def exit(self, status=0, msg=''):\n raise ValueError, msg\n\nec2run_parser = CustomOptionParser(prog='%ec2run', usage='%prog [options] AMI')\nec2run_parser.add_option('-k', '--key', metavar='KEYPAIR', help='Specifies the key pair to use when launching the instance(s).')\nec2run_parser.add_option('-t', '--instance-type', metavar='TYPE', help='Specifies the type of instance to be launched.')\nec2run_parser.add_option('-n', '--instance-count', metavar='MIN-MAX', help='The number of instances to attempt to launch.')\nec2run_parser.add_option('-g', '--group', metavar='GROUP', action='append', help='Specifies the security group.')\nec2run_parser.add_option('-d', '--user-data', metavar='DATA', help='Specifies the user data to be made available to the instance(s) in this reservation.')\nec2run_parser.add_option('-f', '--user-data-file', metavar='DATA-FILE', help='Specifies the file containing user data to be made available to the instance(s) in this reservation.')\nec2run_parser.add_option('-m', '--monitor', action='store_true', help='Enables monitoring of the specified instance(s).')\nec2run_parser.add_option('-z', '--availability-zone', metavar='ZONE', help='Specifies the availability zone to launch the instance(s) in.')\nec2run_parser.add_option('--disable-api-termination', action='store_true', help='Indicates that the instance(s) may not be terminated using the TerminateInstances API call.')\nec2run_parser.add_option('--instance-initiated-shutdown-behavior', metavar='BEHAVIOR', help='Indicates what the instance(s) should do if an on instance shutdown is issued.')\nec2run_parser.add_option('--placement-group', metavar='GROUP_NAME', help='Specifies the placement group into which the instances should be launched.')\nec2run_parser.add_option('--private-ip-address', metavar='IP_ADDRESS', help='Specifies the private IP address to use when launching an Amazon VPC instance.')\nec2run_parser.add_option('--kernel', metavar='KERNEL', help='Specifies the ID of the kernel to launch the instance(s) with.')\nec2run_parser.add_option('--ramdisk', metavar='RAMDISK', help='Specifies the ID of the ramdisk to launch the instance(s) with.')\nec2run_parser.add_option('--subnet', metavar='SUBNET', help='The ID of the Amazon VPC subnet in which to launch the instance(s).')\n# TODO block device mapping, client-token, addressing\n\nec2run_parameters = []\nfor o in ec2run_parser.option_list:\n ec2run_parameters.extend(o._short_opts + o._long_opts)\n\n@expose_magic('ec2run', 'ec2-run-instances')\ndef ec2run(self, parameter_s):\n \"\"\"Launch a number of instances of the specified AMI.\n\n Usage:\\\\\n %ec2run [options] AMI\n Almost all the options from the Amazon command line tool are supported:\n \n -d, --user-data DATA\n Specifies the user data to be made available to the instance(s) in\n this reservation.\n\n -f, --user-data-file DATA-FILE\n Specifies the file containing user data to be made available to the\n instance(s) in this reservation.\n\n -g, --group GROUP [--group GROUP...]\n Specifies the security group (or groups if specified multiple times)\n within which the instance(s) should be run. Determines the ingress\n firewall rules that will be applied to the launched instances.\n Defaults to the user's default group if not supplied.\n\n -k, --key KEYPAIR\n Specifies the key pair to use when launching the instance(s).\n\n -m, --monitor\n Enables monitoring of the specified instance(s).\n\n -n, --instance-count MIN[-MAX]\n The number of instances to attempt to launch. May be specified as a\n single integer or as a range (min-max). This specifies the minumum\n and maximum number of instances to attempt to launch. If a single\n integer is specified min and max are both set to that value.\n\n -s, --subnet SUBNET\n The ID of the Amazon VPC subnet in which to launch the instance(s).\n\n -t, --instance-type TYPE\n Specifies the type of instance to be launched. Refer to the latest\n Developer's Guide for valid values.\n\n -z, --availability-zone ZONE\n Specifies the availability zone to launch the instance(s) in. Run the\n 'ec2-describe-availability-zones' command for a list of values, and\n see the latest Developer's Guide for their meanings.\n\n --disable-api-termination\n Indicates that the instance(s) may not be terminated using the\n TerminateInstances API call.\n\n --instance-initiated-shutdown-behavior BEHAVIOR\n Indicates what the instance(s) should do if an on instance shutdown\n is issued. The following values are supported\n \n - 'stop': indicates that the instance should move into the stopped\n state and remain available to be restarted.\n \n - 'terminate': indicates that the instance should move into the\n terminated state.\n\n --kernel KERNEL\n Specifies the ID of the kernel to launch the instance(s) with.\n\n --ramdisk RAMDISK\n Specifies the ID of the ramdisk to launch the instance(s) with.\n\n --placement-group GROUP_NAME\n Specifies the placement group into which the instances \n should be launched.\n\n --private-ip-address IP_ADDRESS\n Specifies the private IP address to use when launching an \n Amazon VPC instance.\n \"\"\"\n try:\n opts,args = ec2run_parser.parse_args(parameter_s.split())\n except Exception, ex:\n raise IPython.ipapi.UsageError, str(ex)\n return\n\n if not args:\n raise IPython.ipapi.UsageError, '%ec2run needs an AMI specifying'\n return\n\n run_args = {}\n if opts.instance_type:\n run_args['instance_type'] = opts.instance_type\n if opts.key:\n run_args['key_name'] = opts.key\n if opts.instance_count:\n if '-' in opts.instance_count:\n a,b = opts.instance_count.split('-')\n run_args['min_count'] = int(a)\n run_args['max_count'] = int(b)\n else:\n a = int(opts.instance_count)\n run_args['min_count'] = a\n run_args['max_count'] = a\n if opts.group:\n run_args['security_groups'] = opts.group\n if opts.user_data:\n run_args['user_data'] = opts.user_data\n elif opts.user_data_file:\n run_args['user_data'] = file(opts.user_data_file, 'r')\n if opts.monitor:\n run_args['monitoring_enabled'] = True\n if opts.availability_zone:\n run_args['placement'] = opts.availability_zone\n if opts.disable_api_termination:\n run_args['disable_api_termination'] = opts.disable_api_termination\n if opts.instance_initiated_shutdown_behavior:\n run_args['instance_initiated_shutdown_behavior'] = opts.instance_initiated_shutdown_behavior\n if opts.placement_group:\n run_args['placement_group'] = opts.placement_group\n if opts.private_ip_address:\n run_args['private_ip_address'] = opts.private_ip_address\n if opts.kernel:\n run_args['kernel_id'] = opts.kernel\n if opts.ramdisk:\n run_args['ramdisk_id'] = opts.ramdisk\n if opts.subnet:\n run_args['subnet_id'] = opts.subnet\n \n run_args['image_id'] = resolve_ami(args[0])\n r = ec2.run_instances(**run_args)\n \n inst = firstinstance([r])\n return str(inst.id)\n\ndef ec2run_completers(self, event):\n cmd_param = event.line.split()\n if event.line.endswith(' '):\n cmd_param.append('')\n arg = cmd_param.pop()\n \n arg = cmd_param.pop()\n if arg in ('-t', '--instance-type'):\n return ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge', 'm2.xlarge', 'm2.2xlarge', 'm2.4xlarge', 'cc1.4xlarge', 't1.micro']\n elif arg in ('-k', '--keys'):\n return [k.name for k in ec2.get_all_key_pairs()]\n elif arg in ('-n', '--instance-count'):\n return ['1', '1-'] # just examples really\n elif arg in ('-g', '--group'):\n return [g.name for g in ec2.get_all_security_groups()]\n elif arg in ('-d', '--user-data'):\n return []\n elif arg in ('-f', '--user-data-file'):\n return [] # TODO hook normal file complete\n elif arg in ('-z', '--availability-zone'):\n return [z.name for z in ec2.get_all_zones()]\n elif arg in ('--instance-initiated-shutdown-behavior'):\n return ['stop', 'terminate']\n elif arg in ('--placement-group'):\n return [g.name for g in ec2.get_all_placement_groups()]\n elif arg in ('--private-ip-address'):\n return []\n elif arg in ('--kernel'):\n return [] # TODO\n elif arg in ('--ramdisk'):\n return [] # TODO\n elif arg in ('--subnet'):\n return [] # TODO\n else:\n params = ec2run_parameters[:]\n # drop from params any already used\n for c in cmd_param:\n o = ec2run_parser.get_option(c)\n if o:\n for v in o._short_opts + o._long_opts:\n if v in params: params.remove(v)\n return params + ami.keys()\n\nip.set_hook('complete_command', ec2run_completers, re_key = '%?ec2run')\nip.set_hook('complete_command', ec2run_completers, re_key = '%?ec2-run-instances')\n\nre_inst_id = re.compile(r'i-\\w+')\nre_tag = re.compile(r'(\\w+):(.+)')\ndef resolve_instances(arg, filters=None):\n inst = None\n if arg == 'latest':\n r = ec2.get_all_instances(filters=filters)\n li = sorted(list_instances(r), key=lambda i:i.launch_time)\n if li:\n return li[-1:]\n else:\n return []\n\n m = re_inst_id.match(arg)\n if m:\n if len(arg) == 10:\n r = ec2.get_all_instances(instance_ids=[arg])\n return list_instances(r)\n else:\n # partial id\n return [ i for i in iter_instances(ec2.get_all_instances()) if i.id.startswith(arg) ]\n\n m = re_tag.match(arg)\n if m:\n r = ec2.get_all_instances(filters={'tag:%s' % m.group(1): m.group(2)})\n return list_instances(r)\n\n return []\n\ndef resolve_instance(arg, filters=None):\n insts = resolve_instances(arg, filters)\n if insts:\n return insts[0]\n else:\n return None\n\ndef args_instances(args, default='error'):\n instances = []\n if args:\n # ensure all instances are found before we start them\n for qs in args:\n insts = resolve_instances(qs)\n if not insts:\n print 'Instance not found for %s' % qs\n return\n instances.extend(insts)\n elif default=='all':\n instances = list_instances(ec2.get_all_instances())\n else:\n raise IPython.ipapi.UsageError, 'Command needs an instance specifying'\n\n if not instances:\n raise IPython.ipapi.UsageError, 'No instances found'\n\n return instances\n\n######################################################\n# magic ec2ssh\n######################################################\n\nre_user = re.compile('^(\\w+@)')\n\ndef ssh_live(ip, port=22):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n try:\n s.connect((ip, port))\n s.shutdown(2)\n return True\n except:\n return False\n\n@expose_magic('ec2ssh')\ndef ec2ssh(self, parameter_s):\n \"\"\"SSH to a running instance.\n\n Usage:\\\\\n %ec2ssh [-i ...] [user@]i-xxxxxxx|Tag:Value|latest\n \n Extra parameters (-i, etc.) will be sent through verbatim to ssh.\n\n The last parameter is expanded into the public host name for the first \n instance matched. The instance may be specified a number of ways:\n - i-xxxxxx: specify an instance by instance id\n - Tag:Value: specify an instance by Tag (e.g. Name:myname)\n - latest: the last launched instance\n\n Note: tab-completion is available, and completes on currently running instances, so\n you can for example do:\n %ec2ssh i-1<TAB> - tab complete of instances with a instance id starting i-1.\n %ec2ssh Name:<TAB> - tab complete of instances with a tag 'Name'.\n \"\"\"\n \n args = parameter_s.split()\n qs = args.pop()\n ssh_args = ' '.join(args)\n username = ''\n m = re_user.match(qs)\n if m:\n username = m.group(1)\n qs = re_user.sub('', qs)\n \n if not qs:\n raise IPython.ipapi.UsageError, '%ec2ssh needs an instance specifying'\n\n inst = resolve_instance(qs)\n if not inst:\n print 'Instance not found for %s' % qs\n return\n \n if inst.state == 'pending':\n print 'Waiting for %s pending->running...' % inst.id\n while inst.update() == 'pending':\n time.sleep(1)\n\n if not ssh_live(inst.ip_address):\n count = 0\n print 'Waiting for %s SSH port...' % inst.id\n # must succeed 3 times to be sure SSH is alive\n while count < 3:\n if ssh_live(inst.ip_address):\n count += 1\n else:\n count = 0\n time.sleep(1)\n \n if inst.state == 'running':\n print 'Connecting to %s...' % inst.public_dns_name\n ip.system('ssh %s %s%s' % (ssh_args, username, inst.public_dns_name))\n else:\n print 'Failed, instance %s is not running (%s)' % (inst.id, inst.state)\n \n return str(inst.id)\n\ndef instance_completer_factory(filters):\n def _completer(self, event):\n try:\n instances = []\n r = list_instances(ec2.get_all_instances(filters=filters))\n instances.extend([i.id for i in r])\n for i in r:\n for k, v in i.tags.iteritems():\n instances.append('%s:%s' % (k, v))\n \n return instances\n except Exception, ex:\n print ex\n return _completer\n\nip.set_hook('complete_command',\n instance_completer_factory(filters={'instance-state-name': 'running'}),\n re_key = '%?ec2ssh')\n\n######################################################\n# generic methods for ec2start, ec2stop, ec2kill\n######################################################\n\ndef _define_ec2cmd(cmd, verb, method, state):\n filters = {'instance-state-name': state}\n \n def _ec2cmd(self, parameter_s):\n args = parameter_s.split()\n instances = args_instances(args)\n \n fn = getattr(ec2, method)\n fn([inst.id for inst in instances])\n return ' '.join( str(inst.id) for inst in instances )\n \n # create function with docstring\n fn = (lambda a,b: _ec2cmd(a,b))\n fn.__doc__ = \"\"\"%(uverb)s selected %(state)s instances.\n \n Usage:\\\\\n %%%(cmd)s i-xxxxxxx|Tag:Value|latest\n \n The last parameter selects the instance(s) to %(verb)s. The instance may be specified\n a number of ways:\n - i-xxxxxx: specify an instance by instance id\n - Tag:Value: specify an instance by Tag (e.g. Name:myname)\n - latest: the last launched instance\n\n Note: tab-completion is available, and completes on appropriate instances, so\n you can for example do:\n %%%(cmd)s i-1<TAB> - tab complete of instances with a instance id starting i-1.\n %%%(cmd)s Name:<TAB> - tab complete of instances with a tag 'Name'.\n \"\"\" % dict(verb=verb, cmd=cmd, uverb=verb.capitalize(), state=state)\n ip.expose_magic(cmd, fn)\n\n ip.set_hook('complete_command',\n instance_completer_factory(filters=filters),\n re_key = '%?'+cmd)\n\n######################################################\n# magic ec2start\n######################################################\n\n_define_ec2cmd('ec2start', 'start', 'start_instances', 'stopped')\n_define_ec2cmd('ec2-start-instances', 'start', 'start_instances', 'stopped')\n\n######################################################\n# magic ec2stop\n######################################################\n\n_define_ec2cmd('ec2stop', 'stop', 'stop_instances', 'running')\n_define_ec2cmd('ec2-stop-instances', 'stop', 'stop_instances', 'running')\n\n######################################################\n# magic ec2kill\n######################################################\n\n_define_ec2cmd('ec2kill', 'terminate', 'terminate_instances', 'running')\n_define_ec2cmd('ec2-terminate-instances', 'terminate', 'terminate_instances', 'running')\n\n######################################################\n# magic ec2din\n######################################################\n\n@expose_magic('ec2din', 'ec2-describe-instances')\ndef ec2din(self, parameter_s):\n \"\"\"List and describe your instances.\n\n Usage:\\\\\n %ec2din [instance ...]\n \"\"\"\n args = parameter_s.split()\n instances = args_instances(args, default='all')\n print '%-11s %-8s %-9s %-11s %-13s %-25s %s' % ('instance', 'state', 'type', 'zone', 'ami', 'launch time', 'name')\n print '='*95\n for i in instances:\n print '%-11s %-8s %-9s %-11s %-13s %-25s %s' % (i.id, i.state[0:8], i.instance_type, i.placement, i.image_id, i.launch_time, i.tags.get('Name',''))\n\n######################################################\n# magic ec2watch\n######################################################\n\ndef _watch_step(args, instances, monitor_fields):\n new_instances = args_instances(args, default='all')\n n_i = new_instances[:]\n id_i = [ i.id for i in n_i ]\n for inst in instances:\n if inst.id in id_i:\n n = id_i.index(inst.id)\n \n # compare properties\n changes = []\n for k in monitor_fields:\n v1 = getattr(inst, k)\n v2 = getattr(n_i[n], k)\n if v1 != v2:\n if v1:\n if v2:\n print ' %s %s: %s->%s' % (inst.id, k, v1, v2)\n else:\n print ' %s -%s: %s' % (inst.id, k, v1)\n else:\n print ' %s +%s: %s' % (inst.id, k, v2)\n\n del id_i[n]\n del n_i[n]\n else:\n # instance has gone\n print '-%s' % inst.id\n \n # new instances\n for i in n_i:\n print '+%s' % inst.id\n\n return new_instances\n\n@expose_magic('ec2watch')\ndef ec2watch(self, parameter_s):\n \"\"\"Watch for changes in any properties on instances.\n\n Usage:\\\\\n %ec2watch [instance ...]\n \"\"\"\n interval = 2\n monitor_fields = ['launch_time', 'instance_type', 'state', 'public_dns_name', 'private_ip_address']\n\n args = parameter_s.split()\n instances = args_instances(args, default='all')\n try:\n while True:\n time.sleep(interval)\n instances = _watch_step(args, instances, monitor_fields)\n except KeyboardInterrupt:\n pass\n\n######################################################\n# magic regions\n######################################################\n\nregions = [ r.name for r in boto.ec2.regions(**creds) ]\n\n@expose_magic('region')\ndef region(self, parameter_s):\n \"\"\"Switch the default region.\n \n Usage:\\\\\n %region <regionname>\n \"\"\"\n parameter_s = parameter_s.strip()\n if parameter_s not in regions:\n raise IPython.ipapi.UsageError, '%region should be one of %s' % ', '.join(regions)\n region = parameter_s\n\n global ec2, ami\n ec2 = boto.ec2.connect_to_region(region, **creds) \n ip.user_ns['ec2'] = ec2\n\n # update ami list\n ami = build_ami_list()\n\ndef region_completers(self, event):\n return regions\nip.set_hook('complete_command', region_completers, re_key = '%?region')\n\ndef set_region(self, region, args):\n print 'set_region: %s' % region\n\n######################################################\n# ipython environment\n######################################################\n\n# make boto available in shell \nip.ex('import boto.ec2')\n\n# set variables in ipython ns\n\n# set prompt to region name\no = ip.options\no.prompt_in1 = r'${ec2.region.name} <\\#>:'\no.prompt_in2 = r' .\\D.:'\no.prompt_out = r'Out<\\#>:'\n\n# remove blank lines between\no.separate_in = ''\no.separate_out = ''\no.separate_out2 = '\\n'\n" }, { "alpha_fraction": 0.6303501725196838, "alphanum_fraction": 0.6986597776412964, "avg_line_length": 30.9034481048584, "blob_id": "1b2569afee33307a84c264ca0f8fa1e1f41c50b0", "content_id": "b726eea862a0cb5273e11df4de46ea60e4c3be81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4626, "license_type": "no_license", "max_line_length": 125, "num_lines": 145, "path": "/README.markdown", "repo_name": "gafton/iboto", "src_encoding": "UTF-8", "text": "iboto - an interactive Amazon webservices shell\n===============================================\n\nIntroduction\n------------\niboto offers an interactive shell with the basic set of ec2 commands from the Amazon\ncommand line tools, on steroids!\n\nIt adds:\n\n- full tab-completion on arguments:\n + amis\n + instance ids\n + tags\n + zones\n + instance types, etc.\n Saving much fiddly copy-pasting of ids around.\n \n- much snappier\n\n Without having to load all of Java up first before running a command you'll see it's\n much snappier controlling instances compared to the Amazon tools (as great as they are!).\n \n- extra functionality:\n\n + ec2ssh - waits for the instance to be running and SSH to be\n available before connecting; all without having to find and copy\n the public dns name, guess when it's booted fully or even open a\n new terminal for SSH.\n\n + ec2watch - closely monitor what is happening to your instances whilst you're waiting.\n \n- all the nice features of ipython\n\n History recall, python integration, session recording, configurability, etc.\n\nIt's probably best illustrated with a demo session...\n\nDemo\n----\n<pre><code>\n./iboto\niboto ready\n\nCommands available:\n%ec2ssh\n%ec2run (aka %ec2-run-instances)\n%ec2start (aka %ec2-start-instances)\n%ec2stop (aka %ec2-stop-instances)\n%ec2kill (aka %ec2-terminate-instances)\n%ec2din (aka %ec2-describe-instances)\n%ec2watch\n%region\n\n'%command?' for more information.\n\neu-west-1 <1>:ec2run -t t[TAB]\neu-west-1 <1>:ec2run -t t1.micro\neu-west-1 <1>:ec2run -t t1.micro -k m[TAB]\neu-west-1 <1>:ec2run -t t1.micro -k mykey\neu-west-1 <1>:ec2run -t t1.micro -k mykey u[TAB]\neu-west-1 <1>:ec2run -t t1.micro -k mykey ubuntu_lucid_\neu-west-1 <1>:ec2run -t t1.micro -k mykey ubuntu_lucid_32_ebs\n Out<1>:'i-e4310993'\n\neu-west-1 <2>:ec2ssh $_\nWaiting for i-e4310993 pending->running...\nWaiting for i-e4310993 SSH port...\nConnecting to ec2-46-51-139-156.eu-west-1.compute.amazonaws.com...\nThe authenticity of host 'ec2-46-51-139-156.eu-west-1.compute.amazonaws.com (46.51.139.156)' can't be established.\nRSA key fingerprint is c7:6a:f5:a7:38:16:5e:1f:4c:ca:cc:bf:4c:b6:d7:de.\nAre you sure you want to continue connecting (yes/no)? yes\nWarning: Permanently added 'ec2-46-51-139-156.eu-west-1.compute.amazonaws.com,46.51.139.156' (RSA) to the list of known hosts.\nLinux ip-10-235-54-107 2.6.32-309-ec2 #18-Ubuntu SMP Mon Oct 18 21:00:20 UTC 2010 i686 GNU/Linux\nUbuntu 10.04.1 LTS\n\nWelcome to Ubuntu!\n...\nubuntu@ip-10-235-54-107:~$ logout\nConnection to ec2-46-51-139-156.eu-west-1.compute.amazonaws.com closed.\n Out<2>:'i-e4310993'\n\neu-west-1 <3>:ec2din \ninstance state type zone ami launch time name\n===============================================================================================\ni-e4310993 running t1.micro eu-west-1a ami-f4340180 2010-12-08T19:30:09.000Z \neu-west-1 <4>:ec2kill i-e43\n Out<4>:'i-e4310993'\n\neu-west-1 <5>:ec2watch i-e43\n i-e4310993 state: shutting-down->terminated\n i-e4310993 -public_dns_name: ec2-46-51-139-156.eu-west-1.compute.amazonaws.com\n i-e4310993 -private_ip_address: 10.235.54.107\n^Ceu-west-1 <6>:^D\nLeaving iboto\n\n</code></pre>\n\nInstallation\n------------\nThere is no need to install, but you will need python, the ipython library and the boto library.\nYou can install the two libraries with easy_install:\n\n $ easy_install boto==2.0b3\n $ easy_install ipython\n\nTo configure:\n\n $ cp config.py.template config.py\n\nThen edit config.py.\n\nYou need to either have your Amazon credentials in the environment as AWS_ACCESS_KEY_ID\nand AWS_SECRET_ACCESS_KEY, or you can configure your details in config.py.\n\nHelp\n----\nThe best documentation is the command documentation accessed by entering '%command?' at the\nshell prompt, e.g.:\n\n '%ec2start?'\n\nboto\n----\nYou can access the boto ec2 connection object from the shell as the variable 'ec2'.\nIf you need to script more advanced steps at any point you have the full boto API\navailable through this, with the niceties of ipython.\n\nAMI tab-completion\n-------------------\nAny custom AMIs in your account will be added to tab-completion based on the name of\nthe snapshot it was generated from.\n\nYou can also add lists of public pre-built amis.\n\nThere are example ami ids for Ubuntu Lucid us-east-1/eu-west-1 under ami/ubuntu-lucid.cfg.\nAny .cfg files added to this directory providing sections for your regions will be added to\nthe tab-completion dictionary.\n\nFuture plans\n------------\n- Add the full set of ec2 tools\n- Add further AWS apis.\n- Parallel ec2ssh execution for more than one host.\n- The sky is the limit!\n" }, { "alpha_fraction": 0.597597599029541, "alphanum_fraction": 0.6163663864135742, "avg_line_length": 25.117647171020508, "blob_id": "73c4ffa040dea42534600405b75b5b43bfadef8b", "content_id": "8ec8f8911fc0267798620afe33379898d997135b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1332, "license_type": "no_license", "max_line_length": 117, "num_lines": 51, "path": "/iboto", "repo_name": "gafton/iboto", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport sys\n\n# check boto installed and new enough\ntry:\n import boto\n major, minor = boto.Version.split('.', 1)\n if 'b' in minor:\n minor, beta = minor.split('b')\n if beta:\n beta = int(beta)\n else:\n beta = 1\n else:\n beta = sys.maxint\n major = int(major)\n minor = int(minor)\n if major < 2 or (major == 2 and minor == 0 and beta < 3):\n print >>sys.stderr, \"You need at least boto 2.0b3 installed to run iboto (found %s).\" % boto.Version\n sys.exit(-1)\nexcept:\n print >>sys.stderr, \"You need boto installed to run iboto:\\n$ easy_install boto\\nor\\n$ pip install boto\"\n sys.exit(-1)\n\ntry:\n from IPython.Shell import IPShellEmbed\nexcept:\n print >>sys.stderr, \"You need ipython installed to run iboto:\\n$ easy_install ipython\\nor\\n$ pip install ipython\"\n sys.exit(-1)\nargs = ['-p','ec2']\n\nbanner = \"\"\"iboto ready\n\nCommands available:\n%ec2ssh\n%ec2run (aka %ec2-run-instances)\n%ec2start (aka %ec2-start-instances)\n%ec2stop (aka %ec2-stop-instances)\n%ec2kill (aka %ec2-terminate-instances)\n%ec2din (aka %ec2-describe-instances)\n%ec2watch\n%region\n\n'%command?' for more information.\n\"\"\"\n\nipshell = IPShellEmbed(args,\n banner = banner,\n exit_msg = 'Leaving iboto')\nipshell()\n" } ]
3
klhong124/Google-Translate-Browser
https://github.com/klhong124/Google-Translate-Browser
2cb8e938ba51e2b2484702c07571c015a5d2a107
56e4ea1a6421595c70cbbf05d0a811f4e5210c28
f727ace87da4f6332aa524dc5642f4f78afe7944
refs/heads/master
2020-06-23T21:41:43.742469
2019-07-30T08:25:02
2019-07-30T08:25:02
198,761,077
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8571428656578064, "alphanum_fraction": 0.8571428656578064, "avg_line_length": 28, "blob_id": "8946dc9bd154121a0bbadc9a1847ee9a74f96e31", "content_id": "070fa2e6918dfbc9d66d47c6e7cbcafec159a921", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 28, "license_type": "no_license", "max_line_length": 28, "num_lines": 1, "path": "/README.md", "repo_name": "klhong124/Google-Translate-Browser", "src_encoding": "UTF-8", "text": "# simplified-chinese-browser" }, { "alpha_fraction": 0.6636136770248413, "alphanum_fraction": 0.6711074113845825, "avg_line_length": 24.553192138671875, "blob_id": "c698e1d9419dfe176267b0bbcb229f8c0d4b7ab7", "content_id": "60665fd5710ea08d8f27c6a181953eb154f6f9b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1201, "license_type": "no_license", "max_line_length": 90, "num_lines": 47, "path": "/init.py", "repo_name": "klhong124/Google-Translate-Browser", "src_encoding": "UTF-8", "text": "import sys\nimport requests\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtCore import QUrl\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nfrom opencc import OpenCC\ncc = OpenCC('t2s')\n\nSCRIPT = '''\n var s = document.createElement(\"script\");\n var d = document.createElement(\"div\");\n d.id = \"google_translate_element\";\n s.src = \"//translate.google.com/translate_a/element.js?cb=googleTranslateElementInit\";\n document.body.appendChild(s);\n document.body.appendChild(d);\n function googleTranslateElementInit() { \n new google.translate.TranslateElement(\n {\n pageLanguage: 'en',\n layout: google.translate.TranslateElement.FloatPosition.TOP_LEFT,\n autoDisplay: true\n },\n 'google_translate_element'\n ); \n }\n googleTranslateElementInit();\n\n'''\n\n\ndef mainPyQt5():\n url = 'http://gatherleg.klhong124.online/'\n html = requests.get(url)\n # print(cc.convert(html.text))\n\n app = QApplication(sys.argv)\n\n browser = QWebEngineView()\n browser.setWindowTitle(\"Xero\")\n browser.load(QUrl(url))\n browser.page().runJavaScript(SCRIPT)\n browser.show()\n\n sys.exit(app.exec_())\n\n\nmainPyQt5()\n" } ]
2
Adarsh-Shrivastava-001/ReinforcementLearning
https://github.com/Adarsh-Shrivastava-001/ReinforcementLearning
8f26fa246101e2d659194f07e896e5d3ad8db738
18736c66d2ae8ee62059e83f97aad42f07194288
3310ea80b98d6d5488f37ff28f3759f233949fd0
refs/heads/master
2020-05-02T07:23:51.146306
2019-03-26T15:31:11
2019-03-26T15:31:11
177,816,969
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5290833115577698, "alphanum_fraction": 0.5570032596588135, "avg_line_length": 26.076923370361328, "blob_id": "1318be2d46a7831d6517d96ece352da60fa6e247", "content_id": "9fa0f85b284c2c41f61628d442e59dbf7fa5818c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2149, "license_type": "no_license", "max_line_length": 100, "num_lines": 78, "path": "/UCB.py", "repo_name": "Adarsh-Shrivastava-001/ReinforcementLearning", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 26 17:11:49 2019\n\n@author: adarsh\n\"\"\"\n\nimport numpy as np\nimport math\n\ndef data_gen(n_classes, prob):\n d=np.zeros((n_classes, 10000))\n for i in range(n_classes):\n d[i]=np.random.choice([0,1], size=10000, p=[1-prob[i], prob[i]])\n return d\n\n\n\ndef UCB(truth):\n n_classes=len(truth)\n test_cases=len(truth[0])\n init_trials=50\n expect=[1/len(truth)]*n_classes\n conf=[0]*n_classes\n n_step=[init_trials]*n_classes\n step=0\n rewards=0\n \n \n for i in range(n_classes):\n expect[i]=np.sum(truth[i][step:step+init_trials])/init_trials\n conf[i]=math.sqrt(math.log(2*init_trials*(i+1))/init_trials)\n rewards=rewards+np.sum(truth[i][step:step+init_trials])\n step=step+init_trials\n print()\n \n upper_bound=[expect[i]+conf[i] for i in range(n_classes)]\n \n \n while step<test_cases:\n action=np.argmax(upper_bound)\n if truth[action][step]==1:\n expect[action]=(expect[action]*n_step[action]/(n_step[action]+1))+(1/(n_step[action]+1))\n conf[action]=math.sqrt(math.log(2*step)/n_step[action])\n upper_bound[action]=expect[action]+conf[action]\n rewards=rewards+1\n else:\n expect[action]=(expect[action]*n_step[action]/(n_step[action]+1))\n conf[action]=math.sqrt(math.log(2*step)/n_step[action])\n upper_bound[action]=expect[action]+conf[action]\n \n n_step[action]+=1\n step=step+1\n\n \n \n \n step=0\n ran_rewards=0\n while step<test_cases:\n action=np.random.choice([0,1,2,3,4])\n if truth[action][step]==1:\n expect[action]=(expect[action]*n_step[action]/(n_step[action]+1))+(1/(n_step[action]+1))\n ran_rewards=ran_rewards+1\n else:\n expect[action]=(expect[action]*n_step[action]/(n_step[action]+1))\n \n step=step+1\n \n \n \n print(n_step)\n print(expect)\n print(conf)\n print(upper_bound)\n print(rewards)\n print(ran_rewards)\n \n \n \n \n \n " }, { "alpha_fraction": 0.8873239159584045, "alphanum_fraction": 0.8873239159584045, "avg_line_length": 34.5, "blob_id": "e6b47830523339ae885f7eaac5bae59700ee6a87", "content_id": "f4fa558b00d57ace0857d3943b22a09d6a282cad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 71, "license_type": "no_license", "max_line_length": 46, "num_lines": 2, "path": "/README.md", "repo_name": "Adarsh-Shrivastava-001/ReinforcementLearning", "src_encoding": "UTF-8", "text": "# ReinforcementLearning\nReinforcement Learning Algorithms from scratch\n" } ]
2
AnnaGerber/healthy-habits-pet
https://github.com/AnnaGerber/healthy-habits-pet
3ed02e7cb6a7d00bc92021495e6cda244cd58294
15b7f44a3e6839ce8f1b9bacb0944b162d62bf4a
7d9d4ac0bc39407d9ea342d24f0b04563b14ed6a
refs/heads/master
2021-01-20T09:48:57.831124
2017-12-03T11:02:53
2017-12-03T11:02:53
101,607,225
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.5885634422302246, "alphanum_fraction": 0.6304044723510742, "avg_line_length": 19.428571701049805, "blob_id": "6b856424bdf30bb8328dfb9a63d7b9bbd028080d", "content_id": "4e17b85acf54a0e9669b7c06c20b18b34fec68bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 717, "license_type": "no_license", "max_line_length": 129, "num_lines": 35, "path": "/micropython/lights.py", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "import time\nfrom machine import Pin\nfrom neopixel import NeoPixel\n\nimport uos\nimport math\np = Pin(2, Pin.OUT)\nnp = NeoPixel(p, 12)\n\ndef colorWipe(color, delay=0.01):\n global np\n numPixels = np.n\n for i in range(numPixels):\n np[i] = color\n time.sleep(delay)\n np.write()\n\ndef randomWipe(delay=0.01):\n global np\n numPixels = np.n\n for t in range(0, 8):\n color = (math.floor(ord(uos.urandom(1))/4 * 3), math.floor(ord(uos.urandom(1))/3 * 4), math.floor(ord(uos.urandom(1))/2 * 2))\n for i in range(numPixels):\n np[i] = color\n time.sleep(delay)\n np.write()\n\ndef clearWipe():\n colorWipe((0, 0, 0))\n\ndef demo():\n global np\n numPixels = np.n\n colorWipe((0,0,100)) \n time.sleep(0.2)\n " }, { "alpha_fraction": 0.7671641707420349, "alphanum_fraction": 0.8119403123855591, "avg_line_length": 40.75, "blob_id": "30c26e9d556195be29237fd629e6546f368c061b", "content_id": "acb3c54e269e28254267ba844ea7938ffd57b3ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 335, "license_type": "no_license", "max_line_length": 145, "num_lines": 8, "path": "/README.md", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "# healthy-habits-pet\n\nThe micropython directory contains the MicroPython files for healthy habits pet project\n\nYou'll also need the [SSD1306 library](https://github.com/adafruit/micropython-adafruit-ssd1306/releases/download/1.0.1/ssd1306.mpy) for the OLED\n\n\nThe cloud-app directory contains the source code for the cloud application\n\n" }, { "alpha_fraction": 0.6652690172195435, "alphanum_fraction": 0.6652690172195435, "avg_line_length": 21.015384674072266, "blob_id": "f2fec975058b59ae124700b878ccde18044ef9bb", "content_id": "957e3b207dd2249a026fb2e9f91b4577847b3dc5", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1431, "license_type": "permissive", "max_line_length": 72, "num_lines": 65, "path": "/cloud-app/src/main/java/healthyhabit/sample/store/HabitStore.java", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "package healthyhabit.sample.store;\n\nimport java.util.Collection;\n\nimport com.cloudant.client.api.Database;\n\nimport healthyhabit.sample.HabitNotification;\n\n/**\n * Defines the API for a Habit store.\n *\n */\npublic interface HabitStore {\n\n \t/**\n\t * Get the target db object.\n\t * \n\t * @return Database.\n \t * @throws Exception \n\t */\n public Database getDB();\n \n \t/**\n\t * Gets all Habits from the store.\n\t * \n\t * @return All Habits\n \t * @throws Exception \n\t */\n public Collection<HabitNotification> getAll();\n\n /**\n * Gets an individual Habit from the store.\n * @param id The ID of the Habit to get.\n * @return The Habit.\n */\n public HabitNotification get(String id);\n\n /**\n * Persists a Habit to the store.\n * @param habit The Habit to persist.\n * @return The persisted Habit. The Habit will not have a unique ID..\n */\n public HabitNotification persist(HabitNotification habit);\n\n /**\n * Updates a Habit in the store.\n * @param id The ID of the Habit to update.\n * @param habit The Habit with updated information.\n * @return The updated Habit\n */\n public HabitNotification update(String id, HabitNotification habit);\n\n /**\n * Deletes a Habit from the store.\n * @param id The ID of the Habit to delete.\n */\n public void delete(String id);\n \n /**\n * Counts the number of Habits\n * @return The total number of Habits\n * @throws Exception \n */\n public int count() throws Exception;\n}\n" }, { "alpha_fraction": 0.6377049088478088, "alphanum_fraction": 0.6393442749977112, "avg_line_length": 17.08148193359375, "blob_id": "f8d759e86e07ad0ecab3b6bd1a7a23af7e7d5dee", "content_id": "30a5e1ddf60d30e429f71f28bcd5beae5b682038", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2440, "license_type": "permissive", "max_line_length": 79, "num_lines": 135, "path": "/cloud-app/src/main/java/healthyhabit/sample/HabitNotification.java", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "package healthyhabit.sample;\n\nimport java.util.Date;\n\n/**\n * Represents the HabitTracker device notification document stored in Cloudant.\n * Used to record details of a habit notification (e.g. exercise completed) \n */\n\npublic class HabitNotification {\n\tprivate String _id;\n\tprivate String _rev;\n\tprivate Date timestamp = null;\n\tprivate String deviceId = null;\n\tprivate String deviceType = null;\n\tprivate String eventType = null;\n\n\t\n\tpublic HabitNotification() {\n\t\tthis.deviceId = \"\";\n\t\tthis.deviceType = \"ESP8266\";\n\t\tthis.eventType = \"habit\";\n\t}\n\n\t/**\n\t * Gets the ID.\n\t * \n\t * @return The ID.\n\t */\n\tpublic String get_id() {\n\t\treturn _id;\n\t}\n\n\t/**\n\t * Sets the ID\n\t * \n\t * @param _id\n\t * The ID to set.\n\t */\n\tpublic void set_id(String _id) {\n\t\tthis._id = _id;\n\t}\n\n\t/**\n\t * Gets the revision of the document.\n\t * \n\t * @return The revision of the document.\n\t */\n\tpublic String get_rev() {\n\t\treturn _rev;\n\t}\n\n\t/**\n\t * Sets the revision.\n\t * \n\t * @param _rev\n\t * The revision to set.\n\t */\n\tpublic void set_rev(String _rev) {\n\t\tthis._rev = _rev;\n\t}\n\t\n\t/**\n\t * Gets the deviceId of the document.\n\t * \n\t * @return The deviceId of the document.\n\t */\n\tpublic String getDeviceId() {\n\t\treturn deviceId;\n\t}\n\n\t/**\n\t * Sets the name of the device through which the habit was notified\n\t * \n\t * @param name\n\t * The deviceName to set.\n\t */\n\tpublic void setDeviceId(String deviceId) {\n\t\tthis.deviceId = deviceId;\n\t}\n\t/**\n\t * Sets the type of the device through which the habit was notified\n\t * \n\t * @param name\n\t * The deviceName to set.\n\t */\n\tpublic void setDeviceType(String deviceType) {\n\t\tthis.deviceType = deviceType;\n\t}\n\t\n\t/** Gets the deviceType of the document.\n\t * \n\t * @return The deviceType of the document.\n\t */\n\tpublic String getDeviceType() {\n\t\treturn deviceType;\n\t}\n\t/**\n\t * Sets the event type through which the habit was notified\n\t * \n\t * @param name\n\t * The eventType to set.\n\t */\n\tpublic void setEventType(String eventType) {\n\t\tthis.eventType = eventType;\n\t}\n\t\n\t/** Gets the deviceType of the document.\n\t * \n\t * @return The deviceType of the document.\n\t */\n\tpublic String getEventType() {\n\t\treturn eventType;\n\t}\n\n\t/**\n\t * Gets the timestamp of the document.\n\t * \n\t * @return The timestamp of the document.\n\t */\n\tpublic Date getTimestamp() {\n\t\treturn timestamp;\n\t}\n\n\t/**\n\t * Sets the timestamp\n\t * \n\t * @param timestamp\n\t * The timestamp to set.\n\t */\n\tpublic void setTimestamp(Date ts) {\n\t\tthis.timestamp = ts;\n\t}\n\n}" }, { "alpha_fraction": 0.735897421836853, "alphanum_fraction": 0.735897421836853, "avg_line_length": 23.375, "blob_id": "32fc24a6d238b988776d5e21f2b4e9f935a52954", "content_id": "c6351be896d9ee9be4cc657e2d726c53c3801958", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 390, "license_type": "no_license", "max_line_length": 48, "num_lines": 16, "path": "/micropython/main.py", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "import network\nimport mqttbluemix\n\n# disable access point mode\nap_if=network.WLAN(network.AP_IF)\nap_if.active(False)\n# configure connection to wireless network\nsta_if = network.WLAN(network.STA_IF)\nif not sta_if.isconnected():\n sta_if.active(True)\n sta_if.connect('<yourssid>', '<yourpassword>')\n while not sta_if.isconnected():\n pass\n\n# start the main program \nmqttbluemix.main()\n" }, { "alpha_fraction": 0.7152174115180969, "alphanum_fraction": 0.719565212726593, "avg_line_length": 26.073530197143555, "blob_id": "7bdf1463c44bd9afffc8e604eda828c7623bc11f", "content_id": "326103bb47f6a936eaa34883442a16526bdd08b2", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1840, "license_type": "permissive", "max_line_length": 78, "num_lines": 68, "path": "/cloud-app/src/main/java/healthyhabit/sample/rest/HabitAPI.java", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "package healthyhabit.sample.rest;\n\nimport java.util.ArrayList;\nimport java.util.Collections;\nimport java.util.Date;\nimport java.util.List;\n\nimport javax.ws.rs.ApplicationPath;\nimport javax.ws.rs.Consumes;\nimport javax.ws.rs.GET;\nimport javax.ws.rs.POST;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.PathParam;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.Application;\n\nimport com.google.gson.Gson;\n\nimport healthyhabit.sample.HabitNotification;\nimport healthyhabit.sample.store.HabitStore;\nimport healthyhabit.sample.store.HabitStoreFactory;\nimport healthyhabit.sample.mqtt.ReminderPublisher;\nimport healthyhabit.sample.mqtt.ReminderPublisherFactory;\n\n@ApplicationPath(\"api\")\n@Path(\"/tracker\")\npublic class HabitAPI extends Application {\n\t\n\tHabitStore store = HabitStoreFactory.getInstance();\n\tReminderPublisher reminderPublisher = ReminderPublisherFactory.getInstance();\n\t\n /**\n * Gets a list of times when habit was tracked this month\n * GET http://localhost:9080/HealthyHabitsBackend/api/tracker\n * @return A collection of all the Habit Notification times\n */\n @GET\n @Path(\"/\")\n @Produces({\"application/json\"})\n public String getHabits() {\n\t\t\n\t\tif (store == null) {\n\t\t\treturn \"[]\";\n\t\t}\n\t\t\n\t\tList<Date> deviceDates = new ArrayList<Date>();\n\t\tfor (HabitNotification doc : store.getAll()) {\n\t\t\tString deviceId = doc.getDeviceId();\n\t\t\tDate timestamp = doc.getTimestamp();\n\t\t\tif (timestamp != null){\n\t\t\t\tdeviceDates.add(timestamp);\n\t\t\t}\n\t\t\tCollections.sort(deviceDates);\n\t\t}\n\t\treturn new Gson().toJson(deviceDates);\n }\n \n /**\n * Trigger a reminder\n * GET http://localhost:9080/HealthyHabitsBackend/api/tracker/remind\n */\n @GET\n @Path(\"/remind/{status}\")\n public String remind(@PathParam(\"status\") String status) {\n \t\treminderPublisher.remind(status);\n \t\treturn \"OK\";\n }\n}" }, { "alpha_fraction": 0.6612421274185181, "alphanum_fraction": 0.6705413460731506, "avg_line_length": 24.525423049926758, "blob_id": "ec0f648590b1dc454a89e836e9400678ea5ccd12", "content_id": "ab556fed75a10cc9a13e0f7979001d1f4d918170", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3011, "license_type": "no_license", "max_line_length": 103, "num_lines": 118, "path": "/micropython/mqttbluemix.py", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "import machine\nimport time\nimport ubinascii\nimport webrepl\nimport oled\nimport soundfx\nimport lights\nimport ujson\nfrom umqtt.simple import MQTTClient\n\ncurrentStatus = 'good'\ndef remind(status='sleep'):\n global currentStatus\n currentStatus = status\n updateEyes(status)\n if status != 'sleep':\n # make a sound\n soundfx.question()\n # light up\n lights.randomWipe()\n\ndef updateEyes(status):\n # eyes indicate current status\n if status == 'sleep':\n beSleepy()\n elif status == 'fair':\n oled.pupils()\n elif status == 'good':\n oled.love()\n elif status == 'great':\n oled.bigLove()\n\ndef updatePet(topic, msg):\n # update the pet status based on message received from backend\n print(msg)\n status = ujson.load(msg).status\n remind(status)\n\n\ndef beHappy():\n global currentStatus\n # hearts in eyes, clear lights and make a happy sound\n lights.clearWipe()\n oled.heartBeat()\n soundfx.happy()\n time.sleep(0.5)\n updateEyes(currentStatus)\n \n\ndef beSleepy():\n # clear lights if on\n lights.clearWipe()\n # sleepy eyes\n oled.sleepy() \n\ndef main():\n button = machine.Pin(14, machine.Pin.IN, machine.Pin.PULL_UP)\n orgid = \"replace with your 6 character org id\"\n token = \"replace with your token\"\n user = \"use-token-auth\"\n # Make sure this matches up with the device type you configured through the IoT platform\n deviceType = \"ESP8266\"\n # Change to match your device Id\n deviceId = \"pet2\"\n\n server = '{}.messaging.internetofthings.ibmcloud.com'.format(orgid)\n clientId = 'd:{}:{}:{}'.format(orgid, deviceType, deviceId)\n try:\n client = MQTTClient(clientId, server, port = 8883, ssl = True, user=user, password=token)\n \n except:\n print('MQTT client setup error')\n\n try:\n client.set_callback(updatePet)\n \n except:\n print('MQTT callback error')\n \n\n pendingNotification = False\n reminded = False\n counter = 0\n while True:\n counter = counter + 1\n \n # every so many runs through the loop, connect to the MQTT broker to publish and check for messages\n # prevents repeated button press spam\n if counter >= 800:\n counter = 0\n if (reminded == False):\n remind('good')\n reminded = True\n client.connect()\n # non-blocking check for messages\n client.subscribe(b\"iot-2/cmd/update-tracker/fmt/json\")\n client.check_msg()\n client.disconnect()\n time.sleep(0.01)\n \n # send notification if button was pressed since last time\n if pendingNotification == True:\n print('connecting to MQTT broker...')\n client.connect()\n client.publish(b\"iot-2/evt/habit/fmt/json\", b\"{\\\"responded\\\":\\\"true\\\"}\")\n pendingNotification = False\n print('disconnecting from MQTT broker')\n client.disconnect()\n \n\n # detect button presses\n firstButtonReading = button.value()\n time.sleep(0.01)\n secondButtonReading = button.value()\n if firstButtonReading and not secondButtonReading:\n # notification will be sent\n pendingNotification = True\n beHappy()" }, { "alpha_fraction": 0.7129186391830444, "alphanum_fraction": 0.7129186391830444, "avg_line_length": 19.799999237060547, "blob_id": "047f53a0ba0544e2edb3b58a52bfd6ae903f7f11", "content_id": "05f5c32fd784d657e7dbeabd5852147af66f4070", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 418, "license_type": "permissive", "max_line_length": 48, "num_lines": 20, "path": "/cloud-app/src/main/java/healthyhabit/sample/mqtt/ReminderPublisherFactory.java", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "package healthyhabit.sample.mqtt;\n\npublic class ReminderPublisherFactory {\n\tprivate static ReminderPublisher instance;\n\tstatic {\n\t\tReminderPublisher publisher;\n\t\ttry {\n\t\t\tpublisher = new ReminderPublisher();\n\t\t\tif (publisher.mqttAsyncClient != null) {\n\t\t\t\tinstance = publisher;\n\t\t\t}\n\t\t} catch (Exception e) {\n\t\t\te.printStackTrace();\n\t\t}\t\n\t}\n\t\n\tpublic static ReminderPublisher getInstance() {\n\t\treturn instance;\n\t}\n}\n\n\n" }, { "alpha_fraction": 0.6851851940155029, "alphanum_fraction": 0.6851851940155029, "avg_line_length": 8.800000190734863, "blob_id": "323b6d7fbc126ae538e626e0a4da47c4caf33012", "content_id": "932fb50d33b511f742139fe5f57d18c4ebf31541", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 54, "license_type": "permissive", "max_line_length": 30, "num_lines": 5, "path": "/cloud-app/src/test/java/test/TestApplication.java", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "package test;\r\n\r\npublic class TestApplication {\r\n\r\n}\r\n" }, { "alpha_fraction": 0.7755101919174194, "alphanum_fraction": 0.7908163070678711, "avg_line_length": 31, "blob_id": "a3a5b6a8abd4260528ecb2b87da4577073c7200e", "content_id": "7e54a858a50f0200423006b7e751c16ec3f6d7a6", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 196, "license_type": "permissive", "max_line_length": 95, "num_lines": 6, "path": "/cloud-app/README.md", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "\n# HealthyHabitsPetBackend\n\n\nThis is a sample cloud app for the Heathy Habits Tracker tutorial.\n\nSee https://www.ibm.com/developerworks/library/iot-lp201-build-skills-iot-health-app/index.html\n\n\n\n" }, { "alpha_fraction": 0.533730149269104, "alphanum_fraction": 0.658730149269104, "avg_line_length": 20.04166603088379, "blob_id": "a201317c071ede4c52d24a32d15f9b91b3b1b440", "content_id": "440a2e4db8ba53e6d4b9b7012d568712a360ed7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "no_license", "max_line_length": 90, "num_lines": 24, "path": "/micropython/soundfx.py", "repo_name": "AnnaGerber/healthy-habits-pet", "src_encoding": "UTF-8", "text": "from machine import Pin\nimport utime\nimport math\np = Pin(12, Pin.OUT)\n\ndef soundFX(amplitude=1000.0, period=1000.0, repeat=100):\n for i in range(1,repeat):\n uDelay = math.floor(1.5 + amplitude + amplitude * math.sin(utime.ticks_ms() / period))\n p.value(1)\n utime.sleep_us(uDelay)\n p.value(0)\n utime.sleep_us(uDelay)\n\ndef worried():\n soundFX(8000.0, 20.0, 100)\n\ndef happy():\n soundFX(90.0,60.0,400)\n\ndef question():\n soundFX(2200.0,100.0,200)\n\ndef sing():\n soundFX(90.0,600.0,4800)" } ]
11
Alexandre0911/hex-converter
https://github.com/Alexandre0911/hex-converter
a1a71ca6a0f66e5a6c82768295e906a486d53227
753e0188dcb20722321e2ff80ad761ca63e49983
13413a28d2f9d2f34a11ed85be1f08214036fd97
refs/heads/main
2023-08-05T01:31:40.079896
2021-09-17T20:10:18
2021-09-17T20:10:18
324,360,418
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5088360905647278, "alphanum_fraction": 0.5185862183570862, "avg_line_length": 24.934425354003906, "blob_id": "629decbbd729cdfe18766a2a4e7b849e3e99949a", "content_id": "f4800011dc82f5c052d94df8f46dc94e1c127f8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1727, "license_type": "no_license", "max_line_length": 78, "num_lines": 61, "path": "/main.py", "repo_name": "Alexandre0911/hex-converter", "src_encoding": "UTF-8", "text": "import pyconverter\r\n\r\n\r\n\r\ndef encoding():\r\n decoded_text = input('\\nPaste here the text: ')\r\n encoded_text = pyconverter.utf8tohex(decoded_text)\r\n print('\\nEncoded text: {}'.format(encoded_text))\r\n\r\ndef decoding():\r\n encoded_text = input('\\nPaste here the text: ')\r\n decoded_text = pyconverter.hextoutf8(encoded_text)\r\n print('\\nDecoded text: {}'.format(decoded_text))\r\n\r\n\r\n\r\ndef link():\r\n decoded_text = input('\\nPaste here the text: ')\r\n decoded_text_list = []\r\n for c in range(len(decoded_text)):\r\n if c % 2 == 0:\r\n decoded_text_list.append('%')\r\n decoded_text_list.append(decoded_text[c])\r\n elif c % 2 != 0:\r\n decoded_text_list.append(decoded_text[c])\r\n encoded_text = pyconverter.utf8tohex(decoded_text)\r\n print('\\nLink: \"http://google.com/search?btnI&q={}\"'.format(encoded_text))\r\n\r\n\r\n\r\nwhile True:\r\n print('''\r\n╔═════ Options ═════╗\r\n║ ║\r\n║ 0 - EXIT ║\r\n║ 1 - TEXT to HEX ║\r\n║ 2 - HEX to TEXT ║\r\n║ ║\r\n╚═══════════════════╝\r\n ''')\r\n try:\r\n opt = int(input('Select option: '))\r\n except ValueError:\r\n print('Invalid Choice. Try again.')\r\n continue\r\n if opt != 0 and opt != 1 and opt != 2:\r\n print('Invalid Choice. Try again.')\r\n elif opt == 0:\r\n break\r\n elif opt == 1:\r\n encoding()\r\n elif opt == 2:\r\n decoding()\r\n print('\\nTry Again (Y/N)?: ', end='')\r\n try:\r\n try_again = str(input('')).lower()\r\n except ValueError:\r\n print('Invalid Choice. Try again.')\r\n continue\r\n if try_again == 'n':\r\n break" }, { "alpha_fraction": 0.739130437374115, "alphanum_fraction": 0.7663043737411499, "avg_line_length": 22, "blob_id": "0ed4bc646cae9520030fe962db65ff9ad0f30f13", "content_id": "1b9e3c0d9327f37ba71d10250db3b891ac99afd3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 184, "license_type": "no_license", "max_line_length": 84, "num_lines": 8, "path": "/README.md", "repo_name": "Alexandre0911/hex-converter", "src_encoding": "UTF-8", "text": "# hex-converter\nHex Converter\n\n# REQUIREMENTS\n\n1- Open your cmd and type: pip install pyconverter\n\nIt should work right of the bat, if it doesn't, this is my discord: Nothingness#9656\n" } ]
2
SwapnaSama/calendarTestApp
https://github.com/SwapnaSama/calendarTestApp
1d89163950b5e0340ac765bc52197e47806d939e
a5910388650385b54c434beb371b278adc35c405
72d44d313f9567ddcd4e2f0c544b45bb13a2c589
refs/heads/main
2023-04-16T06:40:25.716157
2021-04-22T02:26:17
2021-04-22T02:26:17
357,731,277
0
0
null
2021-04-14T01:01:11
2021-04-14T01:01:14
2021-04-15T00:39:10
null
[ { "alpha_fraction": 0.7442622780799866, "alphanum_fraction": 0.7442622780799866, "avg_line_length": 28, "blob_id": "b625ddb7ba655123a2ce2fd6d70923bc6143edb7", "content_id": "0e73d05618aab897b8ec82f587a4933c7a204eea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 75, "num_lines": 21, "path": "/testCases/conftest.py", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "import pytest\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.service import Service as ChromeService\n\ndriver_path = \".\\\\drivers\\\\chromedriver.exe\"\n\[email protected]()\ndef setup():\n chrome_service = ChromeService(driver_path)\n driver = webdriver.Chrome(service=chrome_service)\n print(\"Launching chrome browser...\")\n return driver\n\n\ndef pytest_addoption(parser): # This will get the value from CLI /hooks\n parser.addoption(\"--browser\")\n\n\[email protected]()\ndef browser(request): # This will return the Browser value to setup method\n return request.config.getoption(\"--browser\")\n\n" }, { "alpha_fraction": 0.6889039874076843, "alphanum_fraction": 0.6889039874076843, "avg_line_length": 39.80555725097656, "blob_id": "fb05edc74dd583b80292dbb5be414d8253c59661", "content_id": "53d107bd411a873d3dee97b8480afa0cae995746", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1469, "license_type": "no_license", "max_line_length": 91, "num_lines": 36, "path": "/pageObjects/RegisterPage.py", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "from selenium.webdriver.common.by import By\n\n\nclass RegisterPage:\n textbox_email_name = \"email\"\n textbox_name_name = \"name\"\n textbox_username_name = \"username\"\n textbox_password_name = \"password\"\n textbox_avatar_name = \"image\"\n button_register_xpath = \"/html/body/div/form/button\"\n\n def __init__(self, driver):\n self.driver = driver\n\n def set_email(self, email):\n self.driver.find_element(By.NAME, self.textbox_email_name).clear()\n self.driver.find_element(By.NAME, self.textbox_email_name).send_keys(email)\n\n def set_name(self, name):\n self.driver.find_element(By.NAME, self.textbox_name_name).clear()\n self.driver.find_element(By.NAME, self.textbox_name_name).send_keys(name)\n\n def set_username(self, username):\n self.driver.find_element(By.NAME, self.textbox_username_name).clear()\n self.driver.find_element(By.NAME, self.textbox_username_name).send_keys(username)\n\n def set_password(self, password):\n self.driver.find_element(By.NAME, self.textbox_password_name).clear()\n self.driver.find_element(By.NAME, self.textbox_password_name).send_keys(password)\n\n def set_avatar_url(self, avatar):\n self.driver.find_element(By.NAME, self.textbox_avatar_name).clear()\n self.driver.find_element(By.NAME, self.textbox_avatar_name).send_keys(avatar)\n\n def click_register(self):\n self.driver.find_element(By.XPATH, self.button_register_xpath).click()\n" }, { "alpha_fraction": 0.5865433812141418, "alphanum_fraction": 0.5896250605583191, "avg_line_length": 35.05555725097656, "blob_id": "8454a136d8fc6642a3ccfec700b853b07614f5c7", "content_id": "cd1c288e6b9fe5fe774f472e8179485c2ad23f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1947, "license_type": "no_license", "max_line_length": 82, "num_lines": 54, "path": "/testCases/test_login.py", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "import pytest\nfrom pageObjects.LoginPage import LoginPage\nfrom utilities.readProperties import ReadConfig\nfrom utilities.customLogger import LogGen\nfrom selenium.webdriver.common.by import By\n\n\nclass Test001Login:\n baseURL = ReadConfig.get_application_url() + '/login'\n username = ReadConfig.get_username()\n password = ReadConfig.get_password()\n logger = LogGen.logger()\n\n @pytest.mark.regression\n def test_login_page_title(self, setup):\n self.logger.info(\"*************** Test_001_Login *****************\")\n self.logger.info(\"****Started Login page title test ****\")\n self.driver = setup\n self.logger.info(\"****Opening URL****\")\n self.driver.get(self.baseURL)\n title = self.driver.title\n print(title)\n if title == \"Calendar App\":\n self.logger.info(\"**** Login page title test passed ****\")\n self.driver.close()\n assert True\n else:\n self.logger.error(\"**** Login page title test failed****\")\n self.driver.save_screenshot(\".\\\\screenshots\\\\\" + \"loginPageTitle.png\")\n self.driver.close()\n assert False\n\n @pytest.mark.sanity\n @pytest.mark.regression\n def test_login(self, setup):\n\n self.logger.info(\"****Started Login Test****\")\n self.driver = setup\n self.driver.get(self.baseURL)\n self.lp = LoginPage(self.driver)\n self.lp.set_username(self.username)\n self.lp.set_password(self.password)\n self.lp.click_login()\n name = self.driver.find_element(By.TAG_NAME, 'span').text\n\n if name == \"SWAPNA SAMA\":\n self.logger.info(\"****Login test passed ****\")\n self.driver.close()\n assert True\n else:\n self.logger.error(\"****Login test failed ****\")\n self.driver.save_screenshot(\".\\\\screenshots\\\\\" + \"loginPage.png\")\n self.driver.close()\n assert False\n" }, { "alpha_fraction": 0.6161137223243713, "alphanum_fraction": 0.6161137223243713, "avg_line_length": 22.47222137451172, "blob_id": "787c27fca4548efdc330605c37f5d7e7c7bfb400", "content_id": "aa54f4cd9e65cd19a7f2ff0c524257f1d296029f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 844, "license_type": "no_license", "max_line_length": 60, "num_lines": 36, "path": "/utilities/readProperties.py", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "import configparser\n\nconfig = configparser.RawConfigParser()\nconfig.read(\".\\\\configurations\\\\config.ini\")\n\n\nclass ReadConfig:\n @staticmethod\n def get_application_url():\n url = config.get('common info', 'baseURL')\n return url\n\n @staticmethod\n def get_username():\n username = config.get('common info', 'username')\n return username\n\n @staticmethod\n def get_password():\n password = config.get('common info', 'password')\n return password\n\n @staticmethod\n def get_email():\n email = config.get('common info', 'email')\n return email\n\n @staticmethod\n def get_name():\n name = config.get('common info', 'name')\n return name\n\n @staticmethod\n def get_avatar_url():\n avatar_url = config.get('common info', 'avatar_url')\n return avatar_url" }, { "alpha_fraction": 0.6681922078132629, "alphanum_fraction": 0.6704805493354797, "avg_line_length": 28.200000762939453, "blob_id": "10325a537a0d98e82a796f4b4b1996c76e61da6a", "content_id": "9802187adc3317c4bff90a1b6f12bf5a064c6e8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 437, "license_type": "no_license", "max_line_length": 76, "num_lines": 15, "path": "/pageObjects/LogOutPage.py", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "from selenium.webdriver.common.by import By\n\n\nclass LogOutPage:\n link_logout_xpath = \"/html/body/section/section/div[1]/div/ul/li/a\"\n div_user_class = \"user\"\n\n def __init__(self, driver):\n self.driver = driver\n\n def click_logout(self):\n self.driver.find_element(By.XPATH, self.link_logout_xpath).click()\n\n def click_username(self):\n self.driver.find_element(By.CLASS_NAME, self.div_user_class).click()" }, { "alpha_fraction": 0.7788018584251404, "alphanum_fraction": 0.7788018584251404, "avg_line_length": 26.1875, "blob_id": "0e8265b3d8494299b2f3999e94f4259b28b1d73d", "content_id": "f5dc86e7f4b415a454f92a3408a30ab49f26d824", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 434, "license_type": "no_license", "max_line_length": 114, "num_lines": 16, "path": "/README.md", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "# Calendar Web Application Test\n\nThis project is created to test the web application [Calendar APP](https://github.com/Noob-Coders/Calendar-WebApp)\n\nAbove web app project is hosted in heroku for ease of testing [Live APP](https://my-calendar-web.herokuapp.com)\n\n# Frameworks used\n\nPytest\nSelenium web driver\n\n# Install dependencies\n`python setup.py install`\n\n# Run the app locally using below commands\n`python -m pytest -v testCases/`" }, { "alpha_fraction": 0.6154838800430298, "alphanum_fraction": 0.6184946298599243, "avg_line_length": 38.40678024291992, "blob_id": "74a9ab7ef09b2ed83c37a6b947624b8b67ced9ea", "content_id": "be2da1e53c65b71b2dd13e4907836ace5dc60206", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2325, "license_type": "no_license", "max_line_length": 87, "num_lines": 59, "path": "/testCases/test_register.py", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "import pytest\nfrom pageObjects.RegisterPage import RegisterPage\nfrom utilities.readProperties import ReadConfig\nfrom utilities.customLogger import LogGen\nfrom selenium.webdriver.common.by import By\n\n\nclass Test001Register:\n baseURL = ReadConfig.get_application_url() + '/register'\n email = ReadConfig.get_email()\n name = ReadConfig.get_name()\n username = ReadConfig.get_username()\n avatar_url = ReadConfig.get_avatar_url()\n password = ReadConfig.get_password()\n logger = LogGen.logger()\n\n @pytest.mark.regression\n def test_login_page_title(self, setup):\n self.logger.info(\"*************** Test_001_Register *****************\")\n self.logger.info(\"****Started Register page title test ****\")\n self.driver = setup\n self.logger.info(\"****Opening URL****\")\n self.driver.get(self.baseURL)\n title = self.driver.title\n print(title)\n if title == \"Calendar App\":\n self.logger.info(\"**** Register page title test passed ****\")\n self.driver.close()\n assert True\n else:\n self.logger.error(\"**** Register page title test failed****\")\n self.driver.save_screenshot(\".\\\\screenshots\\\\\" + \"registerPageTitle.png\")\n self.driver.close()\n assert False\n\n @pytest.mark.sanity\n @pytest.mark.regression\n def test_register(self, setup):\n self.logger.info(\"****Started Register Test****\")\n self.driver = setup\n self.driver.get(self.baseURL)\n self.registerPage = RegisterPage(self.driver)\n self.registerPage.set_email(self.email)\n self.registerPage.set_name(self.name)\n self.registerPage.set_username(self.username)\n self.registerPage.set_avatar_url(self.avatar_url)\n self.registerPage.set_password(self.password)\n self.registerPage.click_register()\n message = self.driver.find_element(By.XPATH, '/html/body/div[2]/form/p/a').text\n\n if message == \"Already registered\":\n self.logger.info(\"****Register test passed ****\")\n self.driver.close()\n assert True\n else:\n self.logger.error(\"****Register test failed ****\")\n self.driver.save_screenshot(\".\\\\screenshots\\\\\" + \"registerPage.png\")\n self.driver.close()\n assert False\n" }, { "alpha_fraction": 0.7278106212615967, "alphanum_fraction": 0.7278106212615967, "avg_line_length": 23.285715103149414, "blob_id": "36ac9f639ef1affbb018f6c2e7e412642124f198", "content_id": "e0c0c782ce40a7621f6e1f381362d379ea00006f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 169, "license_type": "no_license", "max_line_length": 47, "num_lines": 7, "path": "/configurations/config.ini", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "[common info]\nbaseURL = https://my-calendar-web.herokuapp.com\nusername = swapna_sama\npassword = swapna123\nemail = [email protected]\nname = Swapna Sama\navatar_url = https://fakeurl.com" }, { "alpha_fraction": 0.6919127106666565, "alphanum_fraction": 0.6919127106666565, "avg_line_length": 36.095237731933594, "blob_id": "76d67752bb2cd2af626e65f775ad710884a6e04d", "content_id": "6cf6e2c795bb5838d4a16eef70b547172f5d7c00", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 779, "license_type": "no_license", "max_line_length": 91, "num_lines": 21, "path": "/pageObjects/LoginPage.py", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "from selenium.webdriver.common.by import By\n\n\nclass LoginPage:\n textbox_username_name = \"username\"\n textbox_password_name = \"password\"\n button_login_xpath = \"/html/body/div/form/button\"\n\n def __init__(self, driver):\n self.driver = driver\n\n def set_username(self, username):\n self.driver.find_element(By.NAME, self.textbox_username_name).clear()\n self.driver.find_element(By.NAME, self.textbox_username_name).send_keys(username)\n\n def set_password(self, password):\n self.driver.find_element(By.NAME, self.textbox_password_name).clear()\n self.driver.find_element(By.NAME, self.textbox_password_name).send_keys(password)\n\n def click_login(self):\n self.driver.find_element(By.XPATH, self.button_login_xpath).click()\n" }, { "alpha_fraction": 0.6331621408462524, "alphanum_fraction": 0.6375641822814941, "avg_line_length": 33.9487190246582, "blob_id": "4ecab3fc5b226419785d908fd5c23efb61851f1c", "content_id": "bdf7944fe9179a96bf9a5ed5553ed41b8442711e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1363, "license_type": "no_license", "max_line_length": 89, "num_lines": 39, "path": "/testCases/test_logout.py", "repo_name": "SwapnaSama/calendarTestApp", "src_encoding": "UTF-8", "text": "import pytest\nfrom pageObjects.LogOutPage import LogOutPage\nfrom pageObjects.LoginPage import LoginPage\nfrom utilities.readProperties import ReadConfig\nfrom utilities.customLogger import LogGen\nfrom selenium.webdriver.common.by import By\n\n\nclass Test001Logout:\n baseURL = ReadConfig.get_application_url() + '/login'\n username = ReadConfig.get_username()\n password = ReadConfig.get_password()\n logger = LogGen.logger()\n\n @pytest.mark.sanity\n @pytest.mark.regression\n def test_logout(self, setup):\n\n self.logger.info(\"****Started Logout Test****\")\n self.driver = setup\n self.driver.get(self.baseURL)\n self.lp = LoginPage(self.driver)\n self.lp.set_username(self.username)\n self.lp.set_password(self.password)\n self.lp.click_login()\n self.log_out = LogOutPage(self.driver)\n self.log_out.click_username()\n self.log_out.click_logout()\n name = self.driver.find_element(By.XPATH, '/html/body/div/div[1]/p[2]/a[1]').text\n\n if name == \"Login\":\n self.logger.info(\"****Logout test passed ****\")\n self.driver.close()\n assert True\n else:\n self.logger.error(\"****Logout test failed ****\")\n self.driver.save_screenshot(\".\\\\screenshots\\\\\" + \"logoutPage.png\")\n self.driver.close()\n assert False\n" } ]
10
ssprowls/GeoCalc
https://github.com/ssprowls/GeoCalc
3af513d653307342b75dabc53a7c1c74ae073207
aee837b162deda1bc85c17bfb18a3cbd87d7c8f0
fc1587586ee9d5723f4d64b638c7e1ccbbe0a8cd
refs/heads/master
2021-07-11T06:13:50.708546
2020-12-04T23:50:25
2020-12-04T23:50:25
247,240,099
1
0
null
2020-03-14T08:32:46
2020-12-04T23:51:30
2021-06-02T01:12:12
Python
[ { "alpha_fraction": 0.569767415523529, "alphanum_fraction": 0.7558139562606812, "avg_line_length": 20.5, "blob_id": "1319da6863d9dbd258f8be2bdbb33251d19cff72", "content_id": "5f561e768b4a059469a696333a92420a95067e8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 86, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/requirements.txt", "repo_name": "ssprowls/GeoCalc", "src_encoding": "UTF-8", "text": "chromedriver-autoinstaller==0.2.2\nselenium==3.141.0\nurllib3==1.25.9\nXlsxWriter==1.2.9\n" }, { "alpha_fraction": 0.4752841889858246, "alphanum_fraction": 0.4862689971923828, "avg_line_length": 37.56650161743164, "blob_id": "af18b07a0b1dc7bc0208d5bd63b82e8a973bfef6", "content_id": "f1a4f876347c87e2f06a8535266bf0b6fd844d87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7829, "license_type": "no_license", "max_line_length": 166, "num_lines": 203, "path": "/write_pdf_to_sheets.py", "repo_name": "ssprowls/GeoCalc", "src_encoding": "UTF-8", "text": "import camelot\nimport io\nimport json\nimport pandas as pd\nimport pickle\nimport pprint\nimport os.path\nimport matplotlib\nimport time\nfrom googleapiclient.discovery import build\nfrom googleapiclient.http import MediaIoBaseDownload\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\n\nSPREADSHEET_ID = '1atmXdOFCyVKSFSuQSSdnV72FPfzGAJk_PJjxwAEVjM4'\nSCOPES = ['https://www.googleapis.com/auth/drive.readonly', 'https://www.googleapis.com/auth/spreadsheets']\n\n\ndef process():\n tables = camelot.read_pdf('20200917-155157-01261_09_16_2020_1503_Snell_Valley_Road_Reservior_7_Field_Daily_Report.pdf', pages='2', line_scale=40, shift_text=[''])\n print(len(tables))\n df = tables[0].df\n pd.set_option('display.width', 480)\n pd.set_option('display.max_columns', 14)\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop(df.index[0], inplace=True)\n print(df)\n keys = ['test_number', 'suffix', 'depth_and_elevation', 'location', 'wet_density',\n 'dry_density', 'max_dry_density', 'optimum_moisture_content', 'moisture_content',\n 'relative_compaction', 'corrected_dry_density', 'corrected_moisture',\n 'corrected_relative_compaction', 'pass_fail_retest']\n for row in df.itertuples():\n idx = 0\n for entry in row:\n # skip the first entry which is the index of the df\n if idx == 0:\n idx += 1\n continue\n # skip entry if no value found\n if entry == '':\n idx += 1\n continue\n entry = entry.replace('\\n', '')\n print(f\"{keys[idx-1]}: {entry}\")\n idx += 1\n\n\n\n\ndef main():\n\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n 'credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open('token.pickle', 'wb') as token:\n pickle.dump(creds, token)\n\n \"\"\"\n service = build('sheets', 'v4', credentials=creds)\n\n sheet = service.spreadsheets()\n result = sheet.values().get(spreadsheetId=SPREADSHEET_ID,\n range='A1').execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('Found data.')\n \"\"\"\n\n drive = build('drive', 'v3', credentials=creds)\n sheets = build('sheets', 'v4', credentials=creds).spreadsheets()\n project_id = '1mMsAhR_fwZu1Vf9b17eKqXzkyNftJemJ'\n\n # for the demo we are starting in a specific project directory\n page_token = None\n while True:\n query = f\"'{project_id}' in parents and mimeType='application/pdf'\"\n project_files = drive.files().list(q=query,\n fields='nextPageToken, files(id, name)',\n pageToken=page_token).execute()\n for file in project_files.get('files', []):\n print(f\"Looking at file '{file.get('name')}'\")\n\n if \"Field_Daily_Report\" in file.get('name'):\n\n print(\"Downloading ...\")\n # write to spreadsheet\n request = drive.files().get_media(fileId=file.get('id'))\n fh = io.FileIO(file.get('name'), 'wb')\n downloader = MediaIoBaseDownload(fh, request)\n done = False\n while done is False:\n status, done = downloader.next_chunk()\n\n print(\"Processing ...\")\n tables = camelot.read_pdf(file.get('name'), pages='2', line_scale=40, shift_text=[''])\n\n if len(tables) == 0:\n print(\"No table data found ...\\n\")\n continue\n\n else:\n print(f\"(found {len(tables)} table(s))\")\n df = tables[0].df\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop(df.index[0], inplace=True)\n pd.set_option('display.width', 480)\n pd.set_option('display.max_columns', 14)\n #print(df)\n\n keys = ['test_number', 'suffix', 'depth_and_elevation', 'location', 'wet_density',\n 'dry_density', 'max_dry_density', 'optimum_moisture_content', 'moisture_content',\n 'relative_compaction', 'corrected_dry_density', 'corrected_moisture',\n 'corrected_relative_compaction', 'pass_fail_retest']\n # create a dict for our values, default to None\n vals = {}\n for key in keys:\n vals[key] = None\n\n print(\"Values ...\")\n for row in df.itertuples():\n idx = 0\n for entry in row:\n # skip the first entry which is the index of the df\n if idx == 0:\n idx += 1\n continue\n # skip entry if no value found\n if entry == '':\n idx += 1\n continue\n entry = entry.replace('\\n', '')\n #print(f\"{keys[idx-1]}: {entry}\")\n vals[keys[idx-1]] = entry\n idx += 1\n\n print(f\"{vals}\")\n\n # we can write to the spreadsheet here\n # but we probably want to do a batch write at the end\n # which would mean we need to store all the dicts rather\n # than overwrite\n \"\"\"\n values = [list(vals.values())]\n body = {\n 'values': values\n }\n result = sheets.values().update(\n spreadsheetId='1atmXdOFCyVKSFSuQSSdnV72FPfzGAJk_PJjxwAEVjM4', range='A2:N2',\n valueInputOption='RAW', body=body).execute()\n \"\"\"\n\n rows = [list(vals.values())]\n sheets.values().append(\n #spreadsheetId='1atmXdOFCyVKSFSuQSSdnV72FPfzGAJk_PJjxwAEVjM4',\n # TODO: reformat order of values\n spreadsheetId='1Is64aJWC8VaIZgQjCIPNoO9alKiJqlNbIa7YLUZCPUw',\n range=\"Sheet1!A:N\",\n body={\n \"majorDimension\": \"ROWS\",\n \"values\": rows\n },\n valueInputOption=\"USER_ENTERED\"\n ).execute()\n\n # clear the vals dict for the next iteration\n for key in keys:\n vals[key] = None\n\n print()\n\n else:\n print(\"Skipping ...\\n\")\n\n page_token = project_files.get('nextPageToken', None)\n if page_token is None:\n break\n\n\ndef test():\n d = {'a': 1}\n print(list(d.values()))\n print(type(list(d.values())))\n\nif __name__ == \"__main__\":\n #test()\n main()\n # process()\n" }, { "alpha_fraction": 0.5732201337814331, "alphanum_fraction": 0.5894269943237305, "avg_line_length": 33.32450485229492, "blob_id": "5850749bfd4bd75cf9ba201c032a862ae8547ca8", "content_id": "13ec8bcdf1b07b9f831973162cb8de636064c184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5183, "license_type": "no_license", "max_line_length": 114, "num_lines": 151, "path": "/runner.py", "repo_name": "ssprowls/GeoCalc", "src_encoding": "UTF-8", "text": "from decimal import Decimal\n\nimport json\nimport sys\nimport time\nimport traceback\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nimport chromedriver_autoinstaller\n\nimport xlsxwriter\n\n##\n#\n# This is the runner to automatically calculate spectral periods for a given set of input data.\n#\n##\n\n#\n# https://realpython.com/installing-python/#step-1-download-the-python-3-installer\n#\n\nBASE_URL = 'https://earthquake.usgs.gov/designmaps/rtgm/'\n\nprint('\\nStarting...\\n')\n\n# init webdriver\nchromedriver_autoinstaller.install()\ndriver = webdriver.Chrome()\ndriver.get(BASE_URL)\ndriver.maximize_window()\n\n# open data file\nwith open('raw_data.json') as fp:\n data = json.load(fp)\n\n# set up excel workbook and worksheet\nworkbook = xlsxwriter.Workbook('temp_site_specific_seismic.xlsx')\nworksheet = workbook.add_worksheet()\nrow = 1\ncol = 'A'\n\nspectral_periods = [0.0, 0.1, 0.2, 0.3, 0.5, 0.75, 1.0, 2.0, 3.0, 4.0, 5.0]\n\nprint('Calculating values...\\n')\n\nfor x in range(11):\n\n try:\n\n header = 'SPECTRAL PERIOD {}'.format(spectral_periods[x])\n print(header)\n\n worksheet.write('{}{}'.format(col, row), header)\n row += 1\n\n x_vals = [str(i) for i in data['response'][x]['metadata']['xvalues']]\n y_vals = [str(i) for i in data['response'][x]['data'][0]['yvalues']]\n assert (len(x_vals) == len(y_vals))\n\n # Note: this is checking for continuous entries of 0.0, but does not account for multiple of other numbers\n flag = 0\n for i in reversed(range(len(y_vals))):\n # we can break early once the value is greater than zero\n if Decimal(y_vals[i]) > Decimal('0'):\n break\n # set the flag on the first occurence of zero\n if Decimal(y_vals[i]) == Decimal('0') and flag == 0:\n flag += 1\n continue\n # handle the case where there are multiple entries of zero\n if Decimal(y_vals[i]) == Decimal('0') and flag >= 1:\n flag += 1\n # strikethrough\n y_vals[i + 1] = ''.join([u'\\u0336{}'.format(c) for c in y_vals[i + 1]])\n x_vals[i + 1] = ''.join([u'\\u0336{}'.format(c) for c in x_vals[i + 1]])\n\n if flag > 1:\n print('X VALUES: {} (modified) \\nY VALUES: {} (modified)\\n'.format(x_vals, y_vals))\n else:\n print('X VALUES: {}\\nY VALUES: {}\\n'.format(x_vals, y_vals))\n\n # curve title\n rtgm_title_inputElement = driver.find_element_by_id('rtgm-input-view-0-title')\n rtgm_title_inputElement.clear()\n rtgm_title_inputElement.send_keys('Spectral Period: {}'.format(spectral_periods[x]))\n\n end = len(x_vals) - (flag - 1)\n\n # spectral response acceleration values (x vals)\n rtgm_sa_vals_inputElement = driver.find_element_by_id('rtgm-input-view-0-sa')\n rtgm_sa_vals_inputElement.clear()\n rtgm_sa_vals_inputElement.send_keys(', '.join(x_vals[:end]))\n\n # annual frequency of exceedence values (y vals)\n rtgm_afe_vals_inputElement = driver.find_element_by_id('rtgm-input-view-0-afe')\n rtgm_afe_vals_inputElement.clear()\n rtgm_afe_vals_inputElement.send_keys(', '.join(y_vals[:end]))\n\n # compute button\n rtgm_compute_button = driver.find_element_by_id('rtgm-input-view-0-compute')\n rtgm_compute_button.click()\n\n # Note: need to go x + 2 for the index since it's not zero-based and the page starts with\n # an existing entry\n vals = [\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, '/html/body/main/div/div/div[2]/ul/li[{}]/dl/dd[1]'.format(x + 2)))\n ).text,\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, '/html/body/main/div/div/div[2]/ul/li[{}]/dl/dd[2]'.format(x + 2)))\n ).text,\n WebDriverWait(driver, 10).until(EC.presence_of_element_located(\n (By.XPATH, '/html/body/main/div/div/div[2]/ul/li[{}]/dl/dd[3]'.format(x + 2)))\n ).text\n ]\n\n # TODO: is this necessary?\n time.sleep(.5)\n\n # write data to workbook\n worksheet.write('{}{}'.format(col, row), 'x_vals: {}'.format(x_vals[:end]))\n row += 1\n worksheet.write('{}{}'.format(col, row), 'y_vals: {}'.format(y_vals[:end]))\n row += 1\n worksheet.write('{}{}'.format(col, row), 'UHGM: {}'.format(vals[0]))\n row += 1\n worksheet.write('{}{}'.format(col, row), 'RTGM: {}'.format(vals[1]))\n row += 1\n worksheet.write('{}{}'.format(col, row), 'RC: {}'.format(vals[2]))\n row += 2\n\n except Exception as ex:\n\n # print('Error... {}'.format(ex))\n try:\n exc_info = sys.exc_info()\n finally:\n traceback.print_exception(*exc_info)\n del exc_info\n sys.exit(1)\n\n# not going to close the driver in case we want to keep the web page open to verify\n# driver.close()\nworkbook.close()\n\nprint('Success...')\n" }, { "alpha_fraction": 0.7054794430732727, "alphanum_fraction": 0.7351598143577576, "avg_line_length": 35.41666793823242, "blob_id": "2346400b6b0443705efd782e9e1e79900b49799f", "content_id": "afa81ddfa7946c575db46bf4da916bc6c66859a5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 438, "license_type": "no_license", "max_line_length": 127, "num_lines": 12, "path": "/setup.py", "repo_name": "ssprowls/GeoCalc", "src_encoding": "UTF-8", "text": "from setuptools import setup\n\n# https://stackoverflow.com/questions/14399534/reference-requirements-txt-for-the-install-requires-kwarg-in-setuptools-setup-py\n\nwith open('requirements.txt') as f:\n required = f.read().splitlines()\n\nsetup(name='GeoCalc',\n version='1.00.01',\n description='An automated runner to calculate Spectral Periods from a given data input.',\n author='yaboyspence',\n install_requires=required)\n\n" } ]
4
RaspVor/LiarLiar_Sarsa
https://github.com/RaspVor/LiarLiar_Sarsa
77299a66f0dd74e0f3ba98c721c65b3732bda14f
a9ef65e0f525156bafe17053e1e010509444a93e
3353403e8f8b7d0a974d06005062bd27034e9bdc
refs/heads/master
2022-11-19T19:40:27.135623
2020-07-26T15:08:42
2020-07-26T15:08:42
277,323,832
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.762566864490509, "alphanum_fraction": 0.7636363506317139, "avg_line_length": 57.4375, "blob_id": "1902794222b61a827905195a25ace0136c735c35", "content_id": "149c3e81ff2065e7f960cd9fe05ded2e24361c2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 935, "license_type": "no_license", "max_line_length": 278, "num_lines": 16, "path": "/README.md", "repo_name": "RaspVor/LiarLiar_Sarsa", "src_encoding": "UTF-8", "text": "# LiarLiar_MonteCarlo\n \nThe objectif of the project is to create the game LiarLiar. The goal of the game is to guess if your oponant is lying or not.\n\nThe rules are quite simple:\n\n- Each player receive the same number of cards.\n- For the first round a player is asked to choose a card. he can then tell the true name of the card or a wrong name. The other player has to guess if what is said is true or false. the player who lose take the card on put it on the table, front of him and visible for everyone.\n- The fist player who has 4 similar cards on the table or don't have any more cards in his hand lose.\n\nThe following code allow you to play again the computer which randomly tell the truth or a lie and randomly guess if your call.\n\nThe final objectif or this code is to train a reinforcement model in order to train an 'IA\" to play this game. \n\nLet's try the SARSA method to train an IA to play.\nThis project is in progress...\n" }, { "alpha_fraction": 0.4208256006240845, "alphanum_fraction": 0.4684394598007202, "avg_line_length": 42.60476303100586, "blob_id": "c40b88bffb60ba3e0dfc4d024537a78a2c5e02f9", "content_id": "46b2fa3769ee83a7654d7c380b6f32d9948674af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18317, "license_type": "no_license", "max_line_length": 238, "num_lines": 420, "path": "/launcher.py", "repo_name": "RaspVor/LiarLiar_Sarsa", "src_encoding": "UTF-8", "text": "import random\nimport numpy as np\nimport numpy.random\nimport pandas as pd\nfrom collections import defaultdict\nimport sys\n\ndef launcher_stochastic(Q, epsilon = 1.0/((80000/8000)+1)):\n \n \n # Class\n\n class nb_players:\n def __init__(self, nb=2, listing = np.array([])):\n self.nb=nb\n self.listing=listing\n \n class Player:\n \n def __init__(self, name = \"Anonymous\", player_number = int, cards = list(), cave = list()):\n self.name = name\n self.player_number = player_number\n self.cards = cards\n self.cave = cave\n \n class cards_game:\n def __init__(self, cards_list = np.array([\"Ifrit\",\"Ifrit\",\"Ifrit\",\"Ifrit\",\"Ifrit\",\"Ifrit\",\"Ifrit\",\"Ifrit\",\n \"Shiva\",\"Shiva\",\"Shiva\",\"Shiva\",\"Shiva\",\"Shiva\",\"Shiva\",\"Shiva\",\n \"Ondine\",\"Ondine\",\"Ondine\",\"Ondine\",\"Ondine\",\"Ondine\",\"Ondine\",\"Ondine\",\n \"Ahuri\",\"Ahuri\",\"Ahuri\",\"Ahuri\",\"Ahuri\",\"Ahuri\",\"Ahuri\",\"Ahuri\",\n \"Bahamut\",\"Bahamut\",\"Bahamut\",\"Bahamut\",\"Bahamut\",\"Bahamut\",\"Bahamut\",\"Bahamut\",\n \"Leviathan\",\"Leviathan\",\"Leviathan\",\"Leviathan\",\"Leviathan\",\"Leviathan\",\"Leviathan\",\"Leviathan\",\n \"Golgotha\",\"Golgotha\",\"Golgotha\",\"Golgotha\",\"Golgotha\",\"Golgotha\",\"Golgotha\",\"Golgotha\",\n \"Taurus\",\"Taurus\",\"Taurus\",\"Taurus\",\"Taurus\",\"Taurus\",\"Taurus\",\"Taurus\"]),\n last_cards=np.array([])\n ):\n self.cards_list = cards_list\n self.last_cards = last_cards\n \n class dice:\n def __init__(self, winner = 0, players_list = []):\n self.winner = winner\n self.players_list = players_list\n \n def random_choose(self):\n self.winner = random.randint(1,len(self.players_list))-1\n \n \n \n class round_nb:\n def __init__(self, round_num = 1, player_start = 0, player_next = 0): \n self.round_num = round_num\n self.player_start = player_start\n self.player_next = player_next\n \n \n #Mélanger le jeu de cartes\n def shuffle_deck(deck):\n deck_copy = deck\n np.random.shuffle(deck_copy)\n return(deck_copy)\n \n \n def one_turn_Bot_picker_Action(player):\n #exemple player = Players_list[1]\n \n Picker_options = np.minimum([1,1,1,1,1,1,1,1], player.cards[1])\n temp = len(Picker_options[Picker_options>0])\n \n proba=[1/temp,1/temp,1/temp,1/temp,1/temp,1/temp,1/temp,1/temp]\n Picker_options = Picker_options*proba\n \n \n IA_pick_choice = np.random.choice(8, size=1, p=Picker_options)[0]\n \n Action_pickerpick_list = np.array([[1,0,0,0,0,0,0,0],\n [0,1,0,0,0,0,0,0],\n [0,0,1,0,0,0,0,0],\n [0,0,0,1,0,0,0,0],\n [0,0,0,0,1,0,0,0],\n [0,0,0,0,0,1,0,0],\n [0,0,0,0,0,0,1,0],\n [0,0,0,0,0,0,0,1]]\n )\n \n IA_pick_call = np.random.choice(2, size=1, p=[1/6,5/6])[0] \n \n Action_pickercall_list = np.array([[1,0],[0,1]])\n \n return(Action_pickerpick_list[IA_pick_choice],\n Action_pickercall_list[IA_pick_call],\n np.append(Action_pickercall_list[IA_pick_call],Action_pickerpick_list[IA_pick_choice]))\n \n #xxx one_turn_Bot_picker_Action(Players_list[0].cards[1])\n #xxx> (array([0, 0, 1, 0, 0, 0, 0, 0]),\n #xxx> array([0, 1]),\n #xxx> array([0, 1, 0, 0, 1, 0, 0, 0, 0, 0]))\n \n \n def one_turn_Bot_caller_Action():\n \n IA_call_call = np.random.choice(2, size=1, p=[1/6,5/6])[0] \n \n Action_callercall_list = np.array([[1,0],[0,1]])\n \n return(Action_callercall_list[IA_call_call])\n \n #xxx one_turn_Bot_caller_Action()\n #xxx> array([1, 0])\n \n \n #Control des probas\n def get_probs_pick_actions(Q_s, epsilon, nS = 16, nA=18):\n \"\"\" obtains the action probabilities corresponding to epsilon-greedy policy \"\"\"\n policy_s = np.ones(nA) * epsilon / nS\n best_a = np.argmax(Q_s[:-2])\n policy_s[best_a] = 1 - epsilon + (epsilon / nS)\n \n selector = np.array([1,1,1,1,\n 1,1,1,1,\n 1,1,1,1,\n 1,1,1,1,\n 0,0])\n policy_s = np.where(selector==0, 0, policy_s)\n \n return policy_s\n\n\n def get_probs_call_actions(Q_s, epsilon, nS = 2, nA=18):\n \"\"\" obtains the action probabilities corresponding to epsilon-greedy policy \"\"\"\n policy_s = np.ones(nA) * epsilon / nS\n best_a = np.argmax(Q_s[-2:])+16\n policy_s[best_a] = 1 - epsilon + (epsilon / nS)\n \n selector = np.array([0,0,0,0,\n 0,0,0,0,\n 0,0,0,0,\n 0,0,0,0,\n 1,1])\n policy_s = np.where(selector==0, 0, policy_s)\n \n return policy_s\n \n \n \n # Initialisation du jeu\n\n ##Nombre de joueurs\n Nb_players = nb_players(nb=2, listing = np.arange(0,2))\n \n #xxx Nb_players.nb\n #xxx> 2\n \n ##Definition des joueurs\n \n Player1 = Player(name = \"Joueur_1\", player_number = 0)\n Player2 = Player(name = \"Joueur_2\", player_number = 1)\n Players_list = [Player1,Player2]\n \n #xxx Player.players_list[0].name\n #xxx> 'Joueur_1'\n \n ##Mélange des cartes (fonctionne si plus de 2 joueurs)\n Deck = cards_game()\n \n #xxx Deck.cards_list\n if Nb_players.nb == 2:\n Deck.cards_list = shuffle_deck(Deck.cards_list)[:-10] \n else:\n Deck.cards_list = shuffle_deck(Deck.cards_list)\n \n if len(Deck.cards_list) % Nb_players.nb != 0:\n Deck.last_cards = Deck.cards_list[-1]\n Deck.cards_list = Deck.cards_list[:-1]\n \n \n ##Distribution des cartes (fonctionne si plus de 2 joueurs)\n for i in Players_list:\n df1 = pd.DataFrame({'key': ['Ahuri', 'Bahamut', 'Golgotha', 'Ifrit', 'Leviathan', 'Ondine','Shiva', 'Taurus']})\n \n if i.player_number < Nb_players.nb:\n tempx = np.unique(list(np.sort(Deck.cards_list[:len(Deck.cards_list)//Nb_players.nb*Nb_players.nb].reshape(len(Deck.cards_list)//Nb_players.nb,Nb_players.nb)[:,i.player_number])), return_counts=True)\n df2 = pd.DataFrame({'key': tempx[0],\n 'value': tempx[1]})\n df = df1.merge(df2, on='key', how='left').fillna(0)\n df['value'] = df['value'].apply(np.int64)\n \n i.cards = (np.array(df['key'], dtype='<U9'), np.array(df['value'], dtype='int64'))\n i.cave = (np.array(['Ahuri', 'Bahamut', 'Golgotha', 'Ifrit', 'Leviathan', 'Ondine', \n 'Shiva', 'Taurus'], dtype='<U9'), np.array([0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'))\n else:\n tempx = np.unique(list(np.sort(np.append(Deck.cards_list[:len(Deck.cards_list)//Nb_players.nb*Nb_players.nb].reshape(len(Deck.cards_list)//Nb_players.nb,Nb_players.nb)[:,i.player_number],Deck.last_cards))), return_counts=True)\n \n df2 = pd.DataFrame({'key': tempx[0],\n 'value': tempx[1]})\n df = df1.merge(df2, on='key', how='left').fillna(0)\n df['value'] = df['value'].apply(np.int64)\n \n i.cards = (np.array(df['key'], dtype='<U9'), np.array(df['value'], dtype='int64'))\n i.cave = (np.array(['Ahuri', 'Bahamut', 'Golgotha', 'Ifrit', 'Leviathan', 'Ondine', \n 'Shiva', 'Taurus'], dtype='<U9'), np.array([0, 0, 0, 0, 0, 0, 0, 0], dtype='int64'))\n \n \n ## Qui commence\n ##On lance le dé\n \n de = dice(0,Players_list)\n de.random_choose()\n \n #xxx de.winner\n #xxx> 0 ou 1 si 2 joueurs\n \n #Initialize Counter\n start_ind = 1\n compteur_tour = round_nb(start_ind, Nb_players.listing[de.winner], (Nb_players.listing[de.winner]+1)%len(Nb_players.listing))\n \n #xxx compteur_tour.player_start\n #xxx> 1\n \n #Initialize Game status\n game_end = False\n \n #Initialize Reward\n Reward = 0\n \n #Initialize Actions\n RL_actions = np.array([[1,0,1,0,0,0,0,0,0,0],\n [1,0,0,1,0,0,0,0,0,0],\n [1,0,0,0,1,0,0,0,0,0],\n [1,0,0,0,0,1,0,0,0,0],\n [1,0,0,0,0,0,1,0,0,0],\n [1,0,0,0,0,0,0,1,0,0],\n [1,0,0,0,0,0,0,0,1,0],\n [1,0,0,0,0,0,0,0,0,1],\n [0,1,1,0,0,0,0,0,0,0],\n [0,1,0,1,0,0,0,0,0,0],\n [0,1,0,0,1,0,0,0,0,0],\n [0,1,0,0,0,1,0,0,0,0],\n [0,1,0,0,0,0,1,0,0,0],\n [0,1,0,0,0,0,0,1,0,0],\n [0,1,0,0,0,0,0,0,1,0],\n [0,1,0,0,0,0,0,0,0,1],\n [1,0,0,0,0,0,0,0,0,0],\n [0,1,0,0,0,0,0,0,0,0]\n ]) \n \n \n Zactions =\t{\n \"[1 0 1 0 0 0 0 0 0 0]\" : 0, \n \"[1 0 0 1 0 0 0 0 0 0]\" : 1, \n \"[1 0 0 0 1 0 0 0 0 0]\" : 2, \n \"[1 0 0 0 0 1 0 0 0 0]\" : 3, \n \"[1 0 0 0 0 0 1 0 0 0]\" : 4, \n \"[1 0 0 0 0 0 0 1 0 0]\" : 5, \n \"[1 0 0 0 0 0 0 0 1 0]\" : 6, \n \"[1 0 0 0 0 0 0 0 0 1]\" : 7, \n \"[0 1 1 0 0 0 0 0 0 0]\" : 8, \n \"[0 1 0 1 0 0 0 0 0 0]\" : 9, \n \"[0 1 0 0 1 0 0 0 0 0]\" : 10, \n \"[0 1 0 0 0 1 0 0 0 0]\" : 11, \n \"[0 1 0 0 0 0 1 0 0 0]\" : 12, \n \"[0 1 0 0 0 0 0 1 0 0]\" : 13, \n \"[0 1 0 0 0 0 0 0 1 0]\" : 14, \n \"[0 1 0 0 0 0 0 0 0 1]\" : 15, \n \"[1 0 0 0 0 0 0 0 0 0]\" : 16, \n \"[0 1 0 0 0 0 0 0 0 0]\" : 17\n }\n \n RL_pick_actions_proba = np.array([1/16,1/16,1/16,1/16,\n 1/16,1/16,1/16,1/16,\n 1/16,1/16,1/16,1/16,\n 1/16,1/16,1/16,1/16,\n 0,0])\n \n \n \n \n RL_call_actions_proba = np.array([0,0,0,0,\n 0,0,0,0,\n 0,0,0,0,\n 0,0,0,0,\n 1/2,1/2]) \n \n #Initialisation Action\n prob = np.zeros(18)\n \n action = np.zeros(18)\n \n #Initialise Episode \n episode = []\n \n \n # Deroulement du jeu\n \n while (game_end == False):\n \n #print('Reward :',Reward)\n #print('compteur_tour.round_num :',compteur_tour.round_num)\n #print('compteur_tour.player_start :',compteur_tour.player_start)\n #print('Players_list[0].cards[1] :',Players_list[0].cards[1])\n #print('Players_list[0].cave[1] :',Players_list[0].cave[1])\n #print('Players_list[1].cards[1] :',Players_list[1].cards[1])\n #print('Players_list[1].cave[1] :',Players_list[1].cave[1])\n \n State = (str(compteur_tour.player_start)+str(Players_list[0].cards[1])+str(Players_list[0].cave[1])+str(Players_list[1].cave[1]))\n \n \n if compteur_tour.player_start != 0:\n \n #Initialisation Action/prob\n prob = get_probs_call_actions(Q[State], epsilon) \\\n if (((State in Q) == True) and (np.all(Q[State][-2:] ==0) == False)) else RL_call_actions_proba\n \n #print(\"\\rMAX : {} // Q[State]: {}.\".format(np.argmax(Q[State][-2:])+16 , Q[State]), end=\"\") \n #sys.stdout.flush()\n \n #IA pick\n turn_played_IA = one_turn_Bot_picker_Action(Players_list[compteur_tour.player_start])\n \n #The card picked is removed from his deck\n temp = (np.array(['Ahuri', 'Bahamut', 'Golgotha', 'Ifrit', 'Leviathan', 'Ondine','Shiva', 'Taurus'], dtype='<U9'), \n Players_list[compteur_tour.player_start].cards[1] - turn_played_IA[0])\n Players_list[compteur_tour.player_start].cards = temp\n \n #RL make a call\n RL_call_choice = np.random.choice(18, size=1, p=prob)[0]\n RL_call_action = RL_actions[RL_call_choice][:2]\n Action = RL_actions[RL_call_choice] #save the action\n \n #We check who won\n if ((RL_call_action == turn_played_IA[1]).all() == True):\n temp = (np.array(['Ahuri', 'Bahamut', 'Golgotha', 'Ifrit', 'Leviathan', 'Ondine','Shiva', 'Taurus'], dtype='<U9'), \n Players_list[compteur_tour.player_start].cave[1] + turn_played_IA[0])\n Players_list[compteur_tour.player_start].cave = temp\n compteur_tour = round_nb(compteur_tour.round_num +1 , compteur_tour.player_start, (compteur_tour.player_start+1)%len(Nb_players.listing))\n else:\n temp = (np.array(['Ahuri', 'Bahamut', 'Golgotha', 'Ifrit', 'Leviathan', 'Ondine','Shiva', 'Taurus'], dtype='<U9'), \n Players_list[compteur_tour.player_next].cave[1] + turn_played_IA[0])\n Players_list[compteur_tour.player_next].cave = temp\n compteur_tour = round_nb(compteur_tour.round_num +1 , compteur_tour.player_next, (compteur_tour.player_next+1)%len(Nb_players.listing))\n \n \n #Reward\n if (len(Players_list[compteur_tour.player_start].cards[1][Players_list[compteur_tour.player_start].cards[1] < 0]) > 0):\n game_end = True\n Reward = 10000\n elif (len(Players_list[compteur_tour.player_start].cave[1][Players_list[compteur_tour.player_start].cave[1] > 3]) > 0):\n game_end = True\n Reward = 10000\n elif (len(Players_list[compteur_tour.player_next].cave[1][Players_list[compteur_tour.player_next].cave[1] > 3]) > 0):\n game_end = True\n Reward = -10000\n elif ((RL_call_action == turn_played_IA[1]).all() == True):\n Reward = 10\n else:\n Reward = -10\n \n episode.append((State, str(Action), Reward))\n \n else:\n \n #Initialisation Action/prob\n prob = get_probs_pick_actions(Q[State], epsilon) \\\n if(((State in Q) == True) and (np.all(Q[State][:-2] ==0.) == False)) else RL_pick_actions_proba\n \n #print(\"\\rMAX : {} // Q[State]: {}.\".format(np.argmax(Q[State][:-2]) , Q[State]), end=\"\") \n #sys.stdout.flush()\n \n #RL pick\n RL_pick_choice = np.random.choice(18, size=1, p=prob)[0]\n RL_pick_action = RL_actions[RL_pick_choice]\n \n #Card pick and call made\n RL_pickcard_action = RL_pick_action[2:10]\n RL_pickcall_action = RL_pick_action[:2]\n Action = RL_actions[RL_pick_choice] #save the action\n \n #The card picked is removed from his deck\n temp = (np.array(['Ahuri', 'Bahamut', 'Golgotha', 'Ifrit', 'Leviathan', 'Ondine','Shiva', 'Taurus'], dtype='<U9'), \n Players_list[compteur_tour.player_start].cards[1] - RL_pickcard_action)\n Players_list[compteur_tour.player_start].cards = temp\n \n #IA make a call\n IA_call_choice = one_turn_Bot_caller_Action()\n \n \n #We check who won\n if ((RL_pickcall_action == IA_call_choice).all() == True):\n temp = (np.array(['Ahuri', 'Bahamut', 'Golgotha', 'Ifrit', 'Leviathan', 'Ondine','Shiva', 'Taurus'], dtype='<U9'), \n Players_list[compteur_tour.player_start].cave[1] + RL_pickcard_action)\n Players_list[compteur_tour.player_start].cave = temp\n compteur_tour = round_nb(compteur_tour.round_num +1 , compteur_tour.player_start, (compteur_tour.player_start+1)%len(Nb_players.listing))\n else:\n temp = (np.array(['Ahuri', 'Bahamut', 'Golgotha', 'Ifrit', 'Leviathan', 'Ondine','Shiva', 'Taurus'], dtype='<U9'), \n Players_list[compteur_tour.player_next].cave[1] + RL_pickcard_action)\n Players_list[compteur_tour.player_next].cave = temp\n compteur_tour = round_nb(compteur_tour.round_num +1 , compteur_tour.player_next, (compteur_tour.player_next+1)%len(Nb_players.listing))\n \n #Reward\n if (len(Players_list[compteur_tour.player_start].cards[1][Players_list[compteur_tour.player_start].cards[1] < 0]) > 0):\n game_end = True\n Reward = -10000\n elif (len(Players_list[compteur_tour.player_start].cave[1][Players_list[compteur_tour.player_start].cave[1] > 3]) > 0):\n game_end = True\n Reward = -10000\n elif (len(Players_list[compteur_tour.player_next].cave[1][Players_list[compteur_tour.player_next].cave[1] > 3]) > 0):\n game_end = True\n Reward = 10000\n elif ((RL_pickcall_action == IA_call_choice).all() == True):\n Reward = -10\n else:\n Reward = 10\n \n episode.append((State, str(Action), Reward))\n \n \n return episode\n\nQ = defaultdict(lambda: np.zeros(18))\nlauncher_stochastic(Q)\n" }, { "alpha_fraction": 0.4757336974143982, "alphanum_fraction": 0.5347721576690674, "avg_line_length": 28.28093719482422, "blob_id": "17cf758831f3f1637f4c98c8cfa9f0adc645a831", "content_id": "c27a428e7b73af64146cc33e4dda467905feb407", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8761, "license_type": "no_license", "max_line_length": 123, "num_lines": 299, "path": "/tester.py", "repo_name": "RaspVor/LiarLiar_Sarsa", "src_encoding": "UTF-8", "text": "#Visualisation d'un episode:\nQ = defaultdict(lambda: np.zeros(18))\nepisodee = launcher_stochastic(Q)\n\n\nepsilon = 0.00 \ncounter = 0\nround_num = 1000\nfor i in range(round_num):\n counter += launcher_stochastic(Q, epsilon)[-1][2]\nprint(counter / round_num) \n\nfor i in range(3):\n print(episodee[i])\n \n\n#Action value prediction \nfrom collections import defaultdict\nimport numpy as np\nimport sys\n\n\n\n#Action Values\ndef mc_prediction_q(num_episodes, gamma=1.0, Q = defaultdict(lambda: np.zeros(18)), N = defaultdict(lambda: np.zeros(18))):\n # initialize empty dictionaries of arrays\n \n # loop over episodes\n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 100 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n \n # set the value of epsilon\n epsilon = 1.0/((i_episode/8000)+1) \n \n # generate an episode\n episode = launcher_stochastic(Q, epsilon)\n # obtain the states, actions, and rewards\n states, actions, rewards = zip(*episode)\n # prepare for discounting\n discounts = np.array([gamma**i for i in range(len(rewards)+1)])\n # update the sum of the returns, number of visits, and action-value \n # function estimates for each state-action pair in the episode\n for i, state in enumerate(states):\n old_Q = Q[state][Zactions[actions[i]]] \n old_N = N[state][Zactions[actions[i]]]\n Q[state][Zactions[actions[i]]] = old_Q + (sum(rewards[i:]*discounts[:-(1+i)]) - old_Q)/(old_N+1)\n N[state][Zactions[actions[i]]] += 1\n \n \n return Q, N\n\n\nQ, N = mc_prediction_q(1000000, Q = defaultdict(lambda: np.zeros(18)), N = defaultdict(lambda: np.zeros(18)))\n Q, N = mc_prediction_q(10000, 1.0, Q, N)\nlen(Q)\n \nepsilon = 0.00 \ncounter = 0\nround_num = 1000\nfor i in range(round_num):\n counter += launcher_stochastic(Q, epsilon)[-1][2]\nprint(counter / round_num) \n\nlen(Q)\nsum(([0.47858339, 0.47858339]))\n\nif ((State in Q) and (np.all(Q[State] ==0) == False)) else RL_call_actions_proba\n\n\n\n\nif (((State in Q) == True) and (np.all(Q[State][-2:] ==0) == False)) else RL_call_actions_proba\n \n\n\n #Control des probas\n def get_probs_pick_actions(Q_s, epsilon, nS = 16, nA=18):\n \"\"\" obtains the action probabilities corresponding to epsilon-greedy policy \"\"\"\n policy_s = np.ones(nA) * epsilon / nS\n best_a = np.argmax(Q_s[:-2])\n policy_s[best_a] = 1 - epsilon + (epsilon / nS)\n \n selector = np.array([1,1,1,1,\n 1,1,1,1,\n 1,1,1,1,\n 1,1,1,1,\n 0,0])\n policy_s = np.where(selector==0, 0, policy_s)\n \n return policy_s\n\n\n def get_probs_call_actions(Q_s, epsilon, nS = 2, nA=18):\n \"\"\" obtains the action probabilities corresponding to epsilon-greedy policy \"\"\"\n policy_s = np.ones(nA) * epsilon / nS\n best_a = np.argmax(Q_s[-2:])+16\n policy_s[best_a] = 1 - epsilon + (epsilon / nS)\n \n selector = np.array([0,0,0,0,\n 0,0,0,0,\n 0,0,0,0,\n 0,0,0,0,\n 1,1])\n policy_s = np.where(selector==0, 0, policy_s)\n \n return policy_s\n\nsum(policy_s)\n\ntest = np.array([ 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 0., 0., 0., 0.,\n -10040., 0.])\n\ntest[:-2]\nnp.argmax(test[:-2])+15\nsum(get_probs_call_actions(test,epsilon))\n \n \n \nepsilon = 1.0/((100/8000)+1) \nsum(get_probs_call_actions(Q[\"1[3 5 1 4 5 2 4 2][0 0 0 0 0 0 0 0][0 0 1 0 0 0 0 0]\"],epsilon))\nsum(get_probs_pick_actions(Q[\"0[3 5 2 4 5 2 4 2][0 0 0 0 0 0 0 0][0 0 0 0 0 0 0 0]\"],epsilon))\n\n\npolicy_s[0] = 1 - epsilon + (epsilon / nS)\n\n\n\n\n\n\n\n\n\n\nnp.argmax(Q[\"1[1 0 3 0 0 2 4 2][0 2 2 3 3 3 1 0][2 2 2 2 3 2 1 1]\"])\n\n\nV_sorted = sorted(V.items(), key=lambda item: (item[1][0]), reverse=True)\nV_sorted\n\nN_sorted = sorted(N.items(), key=lambda item: (item[1][0]), reverse=True)\n\nlen(Q)\nnp.argmax(V[\"0[3 4 5 2 3 3 3 4][0 0 0 0 1 0 0 0][0 0 0 0 0 0 0 0]\"])\n\n\ndef get_probs_pick_actions(Q_s, epsilon, nS = 16, nA=18):\n \"\"\" obtains the action probabilities corresponding to epsilon-greedy policy \"\"\"\n policy_s = np.ones(nA) * epsilon / nS\n best_a = np.argmax(Q_s)\n policy_s[best_a] = 1 - epsilon + (epsilon / nS)\n \n selector = np.array([1,1,1,1,\n 1,1,1,1,\n 1,1,1,1,\n 1,1,1,1,\n 0,0])\n policy_s = np.where(selector==0, 0, policy_s)\n \n return policy_s\n\n\ndef get_probs_call_actions(Q_s, epsilon, nS = 2, nA=18):\n \"\"\" obtains the action probabilities corresponding to epsilon-greedy policy \"\"\"\n policy_s = np.ones(nA) * epsilon / nS\n best_a = np.argmax(Q_s)\n policy_s[best_a] = 1 - epsilon + (epsilon / nS)\n \n selector = np.array([0,0,0,0,\n 0,0,0,0,\n 0,0,0,0,\n 0,0,0,0,\n 1,1])\n policy_s = np.where(selector==0, 0, policy_s)\n \n return policy_s\n\nget_probs_call_actions(Q[\"1[1 0 3 0 0 2 4 2][0 2 2 3 3 3 1 0][2 2 2 2 3 2 1 1]\"],epsilon)\n\nstate = \"1[1 0 3 0 0 2 4 2][0 2 2 3 0 0 1 0][2 2 2 2 3 2 1 1]\"\nprobabilite = get_probs_call_actions(Q[state], epsilon, nA) \\\n if state in Q else RL_call_actions_proba\n\nnS=18\nnA=16\n\ni_episode = 1\nepsilon = 1.0/((80000/8000)+1)\nepsilon\npolicy_s = np.ones(nA) * epsilon / nS\npolicy_s\n1 - epsilon + (epsilon / nS)\n\n\ntest = get_probs(Q[\"1[1 0 3 0 0 2 4 2][0 2 2 3 3 3 1 0][2 2 2 2 3 2 1 1]\"],epsilon, nA)\ntest\nRL_pick_actions_proba = np.array([1/16,1/16,1/16,1/16,\n 1/16,1/16,1/16,1/16,\n 1/16,1/16,1/16,1/16,\n 1/16,1/16,1/16,1/16,\n 0,0])\n \non prend les éléments à 0 et on les répartit sur les autres \n \nx=np.random.randint(100, size=(1,18))[0]\nnp.where(RL_pick_actions_proba==0, 0, x)\n\naction = np.random.choice(np.arange(nA), p=get_probs(Q[state], epsilon, nA)) \\\n if state in Q else env.action_space.sample()\n\n\n\n\n\nreturns_sum = defaultdict(lambda: np.zeros(18))\nN = defaultdict(lambda: np.zeros(18))\nQ = defaultdict(lambda: np.zeros(18))\n# loop over episodes\n\n gamma = 1 \n # generate an episode\n episode = launcher_stochastic()\n # obtain the states, actions, and rewards\n states, actions, rewards = zip(*episode)\n # prepare for discounting\n discounts = np.array([gamma**i for i in range(len(rewards)+1)])\n # update the sum of the returns, number of visits, and action-value \n # function estimates for each state-action pair in the episode\n for i, state in enumerate(states):\n i=0\n returns_sum[states[i]][Zactions[actions[i]]] += sum(rewards[i:]*discounts[:-(1+i)])\n print(returns_sum[state][0])\n \n #N[state][actions[i]] += 1.0\n #Q[state][actions[i]] = returns_sum[state][actions[i]] / N[state][actions[i]]\n \n\n\ns = [('red', 1), ('blue', 2), ('red', 3), ('blue', 4), ('red', 1), ('blue', 4)]\nd = defaultdict(set)\nfor k, v in s:\n d[k].add(v)\n\nd['red']\n\nd.items()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef mc_prediction_v(num_episodes, gamma=1.0):\n # initialize empty dictionary of lists\n returns = defaultdict(list)\n # loop over episodes\n for i_episode in range(1, num_episodes+1):\n # monitor progress\n if i_episode % 100 == 0:\n print(\"\\rEpisode {}/{}.\".format(i_episode, num_episodes), end=\"\")\n sys.stdout.flush()\n \n ## TODO: complete the function\n # generate an episode\n episode = launcher()\n # obtain the states, actions, and rewards\n states, actions, rewards = zip(*episode)\n # prepare for discounting\n discounts = np.array([gamma**i for i in range(len(rewards)+1)])\n # calculate and store the return for each visit in the episode\n for i, state in enumerate(states):\n returns[state].append(sum(rewards[i:]*discounts[:-(1+i)]))\n \n # calculate the state-value function estimate\n V = {k: np.mean(v) for k, v in returns.items()}\n \n return V\n\nV = mc_prediction_v(60000)\n\nV_sorted = sorted(V.items(), key=lambda item: (item[1]))\n#V_sorted\n\nlen(V_sorted)\n\n\n#Action value prediction \n\n" } ]
3
justboh/python_penning
https://github.com/justboh/python_penning
2e26b0d49c178867a29c39369ae75f216ee4f1bd
a8e86b90271eb9f3284e5d534eaaf0117df154a1
950b5c463992fb5ffde9a5dad37a2860b9676579
refs/heads/master
2015-08-22T04:42:58.295851
2015-06-11T15:30:29
2015-06-11T15:30:29
32,936,939
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5913059711456299, "alphanum_fraction": 0.6045937538146973, "avg_line_length": 30.734939575195312, "blob_id": "48363078e0d1d5b039dbbdffd995bb950c6c60c4", "content_id": "1df4458337db40748acbe82057771c0940cf05cf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10536, "license_type": "no_license", "max_line_length": 91, "num_lines": 332, "path": "/Analysis/analysis.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\nimport numpy as np\nimport scipy.optimize as op\n\n#collection of classes and functions useful in analyzing data from hfgui\n\n####################################################\n#### least squares fit cover function ##############\n####################################################\n\n#a fit parameter for cFit cover function\nclass cP: \n\tdef __init__(self, name=None, default=None, constant=None, \\\n\t\t\tboundUpper=None, boundLower=None):\n\t\tself.value = default\n\t\tself.name = name\n\t\tself.default = default\n\t\tself.boundUpper = boundUpper\n\t\tself.boundLower = boundLower\n\t\tself.constant = constant\n\n\tdef set(self, value):\n\t\tself.value = value\n\tdef __call__(self):\n\t\treturn self.value\n\n\tdef __str__(self):\n\t\tdef lstr(val): \n\t\t\tif val == None: \n\t\t\t\treturn 'None' \n\t\t\telse: return '%.2f' % val\n\t\ts = '%s = %s [%s0=%s; bounds %s to %s]' % \\\n\t\t\t(self.name,lstr(self.value),self.name, \\\n\t\t\tlstr(self.default),lstr(self.boundLower),lstr(self.boundUpper))\n\t\tif self.constant != None:\n\t\t\ts = s+ ' constant = %.2f\\n' % float(self.constant)\n\t\treturn s\n\n\n#fit cover function\ndef cFit(function, parameters, x, y):\n\tdef f(params):\n\t\ti = 0\n\t\tfor p in parameters:\n\t\t\tp.set(params[i])\n\t\t\ti += 1\n\t\treturn y - function(x)\n\n\t#if x is None: x = arange(y.shape[0])\n\tp = [param() for param in parameters]\n\t#print 'f = ', f(p)\n\t#print 'p = ', p\n\top.leastsq(f, p)\n\t\n####################################\n### cfit2() \n####################################\n\t\n#fit cover function for fmin_slsqp\n#includes constraints on parameters\n\ndef cFit2(testfunc, theParamList, x, y, funcToFit_derivs=None, debug=0):\n\t#funcToFit is a function built out of cP's\n\t#theParamList is a list of N parameters (instances of cP)\n\t#x is a set of independent data points\n\t#y is a set of dependent data points\n\t\n\t#if x is None: x = np.arange(y.shape[0])\n\t\n\t#calculate residual -- called by fmin_slsqp at each step of optimization\n\t#note that fmin_slsqp wants a scalar return value... do sum of squares manually\n\tdef calcResidualf(candidateParamList):\n\t\tfor p,cp in zip(theParamList,candidateParamList):\n\t\t\tp.set(cp)\n\t\ter = (y - testfunc(x))**2\n\t\treturn np.sqrt(er.sum()) #should equal zero if optimized \n\n\t#generate an inequality constraint \n\tdef fineq(z):\n\t\tret = []\n\t\tfor zi, z_val in enumerate(z):\n\t\t\tif z_val > theParamList[zi].boundUpper: \n\t\t\t\tret.append( theParamList[zi].boundUpper - z_val ) #is > 0 if optimized\n\t\t\telse :\n\t\t\t\tret.append( z_val - theParamList[zi].boundLower ) #is > 0 if optimized\n\t\t#print 'fineq(z) -> ', ['%.4f'%q for q in ret]\n\t\treturn np.array(ret)\n\n\t#generate an equality constraint \n\tdef feq(z):\n\t\tret = []\n\t\tfor zi, z_val in enumerate(z):\n\t\t\tif theParamList[zi].constant != None:\n\t\t\t\tret.append( np.abs(theParamList[zi].constant - z_val) ) \n\t\t\telse:\n\t\t\t\tret.append(0)\n\t\t#print 'feq(z) -> ', ['%.2f'%q for q in ret]\n\t\treturn np.array(ret)\n\t\n\t#generate a list of initial conditions (defaults)\n\tp_default = [z.default for z in theParamList]\n\t#print 'p_default = ', p_default\n\t\n\t#generate a bound constraint \n\tnobound = 1e6\n\tboundList = []\n\tfor pel in theParamList:\n\t\tif pel.boundUpper != None:\n\t\t\tupper = pel.boundUpper\n\t\telse:\n\t\t\tupper = nobound\n\t\tif pel.boundLower != None:\n\t\t\tlower = pel.boundLower\n\t\telse:\n\t\t\tlower = -nobound\n\t\tboundList.append((lower,upper))\n\t#print 'boundList -> ', boundList\n\n\tret = op.fmin_slsqp(calcResidualf, p_default,\\\n\t\tiprint=debug, full_output=True, f_ieqcons=fineq) \n\t\t#f_eqcons=feq,\n\treturn ret\n\n#class cFitToSin1:\n#\t\"\"\"\n#\tClass to do fit of a single iteration of an experiment.\n#\te.g. fit to phase shift of velocimetry signal\n#\t\"\"\"\n#\tdef __init__(self,x,y,f0_guess,phi0=0,debug=False):\n#\t\tfName = '__init__'\n#\t\tif self.debug: pLog('DEBUG:'+self.cName+':'+fName+'()')\n#\t\tself.cName = 'cFitToSin1'\n#\t\tself.x = np.array(x) #data to fit\n#\t\tself.y = np.array(y) #data to fit\n#\t\tself.f0_guess #initial estimate of f0\n#\t\tself.phi0 = phi0 #phase offset\n#\t\tself.debug = debug #if plotting using pylab\n#\t\tself.fitParamList = self.initFitParams()\n#\t\t#self.fp_A #set by self.initFitParams()\n#\t\t#self.fp_C #set by self.initFitParams()\n#\t\t#self.fp_f #set by self.initFitParams()\n#\t\t#self.fp_phi #set by self.initFitParams()\n#\t\t#self.fp_tau #set by self.initFitParams()\n#\t\tself.fitInfo = None #info returned by latest fit\n\t\t\n#\tdef fitfunc(self,zz) : \n#\t\tfName = 'fitfunc'\n#\t\tif self.debug: pLog('DEBUG:'+self.cName+':'+fName+'()')\n#\t\treturn self.fp_A()*np.sin( 2*np.pi*self.fp_f()*x+self.fp_phi()+\\\n#\t\t\tself.phi0)*np.exp(-1.0*zz*self.fp_tau()) + self.fp_C()\n\t\t\n#\tdef initFitParams(self):\n#\t\tfName = 'initFitParams'\n#\t\tif self.debug: pLog('DEBUG:'+self.cName+':'+fName+'()')\n\t\t\n#\t\t#fit parameters\n#\t\tbsc_C=0.3\n#\t\tbsc_f=0.01\n\n#\t\tself.fp_A = pena.cP(default=self.y.max()-self.y.min(),name='A',\\\n#\t\t\t\t\t\tboundUpper=self.y.max()-self.y.min(),boundLower=10)\n#\t\tself.fp_C = pena.cP(default=self.y.mean(),name='C',\\\n#\t\t\t\t\t\tboundUpper=self.y.mean()*(1+bsc_C),boundLower=self.y.mean()*(1-bsc_C))\n#\t\tself.fp_f = pena.cP(default=self.f0_guess,name='f',\\\n#\t\t\t\t\t\tboundLower=self.f0_guess*(1-bsc_f),\\\n#\t\t\t\t\t\tboundUpper=self.f0_guess*(1+bsc_f))\n#\t\tself.fp_phi = pena.cP(default=0,name='phi',\\\n#\t\t\t\t\t\tboundLower=0,boundUpper=2*np.pi)\n#\t\tself.fp_tau = pena.cP(default=0.01,name='tau',\\\n#\t\t\t\t\t\tboundUpper=0.1,boundLower=0.001)\n#\t\treturn [fp_A, fp_C, fp_f, fp_phi, fp_tau]\n\t\n#\tdef showDebugPlot(self):\n#\t\tfName = 'showDebugPlot'\n#\t\tpLog('DEBUG:'+self.cName+':'+fName+'()')\n\t\t\n#\t\tfpts = self.getFitSamplePoints()\n#\t\tfitx = fpts['x']\n#\t\tfity = fpts['y']\n\t\t\n#\t\tpylab.clf()\n#\t\tpylab.plot(self.x,self.y,'-')\n#\t\tpylab.plot(fitx,fity,'-')\n#\t\tpylab.ylim([self.y.min(),self.y.max()])\n#\t\tpylab.draw()\n\t\n#\tdef getFitSamplePoints(self,n_samples=200):\t\t\n#\t\tfName = 'getFitSamplePoints'\n#\t\tif self.debug: pLog('DEBUG:'+self.cName+':'+fName+'()')\n#\t\txfit = np.arange(self.x.min(),self.x.max(),\n#\t\t\t(self.x.max()-self.x.min())/n_samples)\t\n#\t\treturn {'x':xfit, 'y':self.fitfunc(xfit)}\n\t\t\t\n#\tdef doFit(self):\n#\t\tfName = 'doFit'\n#\t\tif self.debug: pLog('DEBUG:'+self.cName+':'+fName+'()')\n\t\t\n#\t\t#do the fit -- take best of several initial phases\n#\t\tfit_ok = False; ret = []; vphi_range = 2.0*np.pi\n#\t\tvphi_steps = 4; besti = 0; bestscore = 1e12\n#\t\tfor i, vphi in enumerate(np.arange(0,vphi_range,vphi_range/vphi_steps)):\n#\t\t\tfp_phi.default=vphi\n#\t\t\tself.fitInfo = pena.cFit2(self.fitfunc, self.fitParamList, self.x, self.y, debug=0\n#\t\t\tret.append(self.fitInfo) \n#\t\t\t#of the various starting phases which is best?\n#\t\t\tscore = ret[i][1]\n#\t\t\tif (score < bestscore) & (ret[i][3] == 0): \n#\t\t\t\tbestscore = score; besti = i\n#\t\t\tif debug: \n#\t\t\t\ts = 'DEBUG:hfpyExp:fitToSin1 %d: vphi = %f, score = %f' % (i, vphi,score)\n#\t\t\t\tpLog(s)\n#\t\t\t\tpLog('%d: vphi = %f, score = %f' % (i, vphi,score) )\n\t\t\t\t\n#\t\t\t\t#raw_input('hit RETURN')\n\n#\t\t#which fit was best??\n#\t\tif ret[besti][3] == 0 : #fit is successful if equal to 0\n#\t\t\tfor i,fp in enumerate(fps): \n#\t\t\t\tfp.set(ret[besti][0][i]) \n#\t\t\tif debug:\n#\t\t\t\tpLog('FIT OK! (using %d with score %f)' % (besti,bestscore)\t)\n#\t\t\t\tfor p in fps:\n#\t\t\t\t\ts = 'DEBUG:hfpyExp:fitToSin1 %s' % (p)\n#\t\t\t\t\tpLog(s)\n#\t\t\t\tpylab.plot(xfit,self.fitfunc(xfit),'-',linewidth=3)\n#\t\t\t\tpylab.ylim([self.y.min(),self.y.max()])\n#\t\t\t\tpylab.draw()\n#\t\t\t\tpylab.savefig(gPlotFileName)\n#\t\t\tfit_ok = True \n#\t\telse :\n#\t\t\tif debug:\n#\t\t\t\tpLog('DEBUG:hfpyExp:fitToSin1 FIT FAILED! (smode=%d)' % ret[besti][3])\n#\t\t\tpass\n\t\t\n#\t\tyfit = np.array(self.fitfunc(xfit))\n#\t\tret = {\"fit\":fps,\"sample_x\":xfit,\"sample_y\":yfit}\n#\t\treturn ret\n\t\t\n\t\n#example application of these functions\n\n##do fit\n#fp=0\n#x = d[fp:,0]\n#y = d[fp:,1]\n##fit parameters\n#A = cP(100)\n#f = cP(1.2)\n#phi = cP(0)\n#tau = cP(0.5)\n#c = cP(600)\n##fit function\n#def fitfunc(x) : return A() * np.sin( 2*np.pi*f()*x + phi() ) * np.exp(x*tau()) + c()\n#fitfuncs = 'A() * np.sin( 2*np.pi*f()*x + phi() ) * np.exp(x*tau()) + c()'\n##do the fit\n#cFit(fitfunc, [A,f,phi,tau,c], x, y)\n\n\n##plot results\n\n##x points for evaluating the fit function\n#xfit_points = 200 \n#xfit = np.arange(x.min(),x.max(),(x.max()-x.min())/xfit_points)\n\n#plt.clf()\n#plt.plot(xfit,fitfunc(xfit))\n\n#################################################\n##### HFGUI directory name functions ############\n#################################################\n\ndef hfgui3_dataDirName_dataFileName( dirname ):\n \"\"\"\n hfGui3 uses inconsistent naming of data directories and data files.\n This function translates the data directory name used by the hfgui3 to\n the filename used inside the data directories.\n \"\"\"\n s_date =dirname.split('--')[0].split('-')\n s_time =dirname.split('--')[1].split('.')\n s_fn = 'histData.'+s_date[0]+s_date[1]+s_date[2]+'_'+\\\n s_time[0]+s_time[1]+s_time[2]+'.'+s_time[3]+'.csv'\n return s_fn\n\ndef hfgui3_parseDataFile( fpath, maxcolumn ):\n \"\"\"\n hfGui3 records data as a table. The first row is column headers and\n can vary depending for example on the fvars in a given experiment.\n This function parses this file and returns a data dictionary with\n entries whose elements correspond to the column headers.\n\n fpath is an absolute path to the file\n maxcolumn is the maximum column number to include\n \"\"\"\n #read data\n print fpath\n fh = open(fpath,'r')\n #the first line in the file is the column labels\n labels = fh.readline().split(',')[0:maxcolumn]\n #create a dictionary entry for each column; the data dictionary\n dd = dict.fromkeys(labels,[])\n lines = fh.readlines()\n fh.close\n for line in lines:\n rs = line.strip('\\n').split(',')[0:maxcolumn]\n for (rsi,rsel) in enumerate(rs):\n if rsi == 0: #first column is time\n dd[labels[rsi]] = dd[labels[rsi]] + [rsel]\n else: #all other columns are floats\n dd[labels[rsi]] = dd[labels[rsi]] + [float(rsel)]\n return dd\n\n#example application\n\n##get list of file names\n#rootdir = '/home/britton/pinky/current/penningTrap/correlationExperiments/20100722/'\n#reldatadir = '20315818'\n#dirnames = os.listdir(os.path.join(rootdir,reldatadir))\n#fpaths = []\n#for dn in dirnames:\n# thepath = os.path.join(rootdir,reldatadir,dn,hfgui3_dataDirName_dataFileName( dn ))\n# fpaths.append( thepath )\n\n##parse and plot the data\n#for fn in fpaths:\n# d = hfgui3_parseDataFile(fn,maxcolumn = 20)\n# print d['x'], d['ave-2']\n# pyp.plot(d['x'], d['ave-2'])\n# pyp.title('xxx')\n# pyp.xlabel('xxx')\n# pyp.ylabel('counts')\n# pyp.legend(loc='lower right')\n# pyp.show()\n" }, { "alpha_fraction": 0.5380300283432007, "alphanum_fraction": 0.5579937100410461, "avg_line_length": 28.402912139892578, "blob_id": "061d54934e5bfa31bdb8324b70ca26068203b7a8", "content_id": "ab12cf54f969b5bf0506de51d664069d74ece479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6061, "license_type": "no_license", "max_line_length": 84, "num_lines": 206, "path": "/Analysis/plot_tools_jgb.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 02 10:19:20 2015\n\n@author: jgb\n\"\"\"\n\nimport numpy as np\nfrom numpy import pi, sin, cos\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport scipy.optimize as opt\n\n# define some plot colors\nred = '#A60628'\nblue = '#348ABD'\npurple = '#7A68A6'\ngreen = '#467821'\norange = '#D55E00'\npink = '#CC79A7'\ncyan = '#56B4E9'\naqua = '#009E73'\nyellow = '#F0E442'\nnavy = '#002b36'\n\ndef set_plot_mode(ax, mode='clean'):\n if mode is 'clean':\n plt.grid(False)\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_tick_params(which='major', size=3, width=1)\n ax.yaxis.set_tick_params(which='major', size=3, width=1)\n if mode is 'quant':\n plt.grid(True)\n\ndef set_global_plot_mode(mode='qunat'):\n \"\"\"Function to change the default parameters for matplotlib plot\"\"\"\n if mode == 'quant':\n mpl.rcParams['axes.grid'] = True\n mpl.rcParams['xtick.major.size'] = 5\n mpl.rcParams['ytick.major.size'] = 5\n mpl.rcParams['savefig.dpi'] = 80\n mpl.rcParams['lines.antialiased'] = True\n if mode == 'clean':\n mpl.rcParams['axes.grid'] = False\n mpl.rcParams['xtick.major.size'] = 2\n mpl.rcParams['ytick.major.size'] = 2\n mpl.rcParams['savefig.dpi'] = 80\n mpl.rcParams['lines.antialiased'] = True\n\ndef auto_extent(x,y):\n \"\"\"Get the limits for axis displaying data x, y, +- 10% of full scale\n Parameters\n ----------\n x : array\n x data for plot\n y : array\n y data for plot\n\n Returns\n -------\n axis : list\n the list of xmin, xmax, ymin, ymax for a plot axis\n\n \"\"\"\n xm = np.min(x)\n ym = np.min(y)\n xx = np.max(x)\n yx = np.max(y)\n full_x = abs(xx - xm)\n full_y = abs(yx - ym)\n axis = [xm-0.05*full_x, xx + 0.1*full_x, ym-0.1*full_y, yx+0.1*full_y]\n return axis\n\ndef plot_fit(x,y,fitfunc,fitguess,\n hold=[],\n labels=['X','Y','default'],\n axis='auto',\n save=False,\n show=True):\n \"\"\"Plot data and the fit to a given model, with ability to fix parameters.\n\n Parameters\n ----------\n x : array\n x data for plot\n y : array\n y data for plot\n fitfunc : function\n function of the form f(x,*args)\n fitguess : array\n must supply values for all the parameters in the model given by fitfunc\n\n Keyword arguments\n -----------------\n hold : list\n list of booleans, what model parameters to hold fixed (default none)\n labels : list\n list of strings, xlabel ylabel, plot title\n axis : list\n list of axis extent, [xmin, xmax, ymin, ymax] (default 0, xmax, 0, ymax)\n save: boolean\n boolean to choose if figure is saved as png file (dafault no save)\n show: boolean\n boolean to choose if the data and fit are shown on a plot\n Returns\n -------\n popt : array\n the fitted values only (fitguess if no free parameters)\n perr : array\n sqrt of the diagonals of the covariance matrix (1 sigma confidence interval)\n\n \"\"\"\n\n\n#process hold parameter to make a fit model with specified number of free params\n if np.size(hold) == 0:\n #default, fit it all\n hold = np.zeros(np.size(fitguess), dtype=bool)\n varin = fitguess\n else:\n #create the list of free parameter guesses\n #note ~ performs the NOT function on the boolean array\n varin = fitguess[~hold]\n\n if hold.all():\n #No free params, just plot the model, don't perform fit\n print(\"No free parameters, plotting model\")\n x_curve = np.linspace(np.min(x),np.max(x), num=200)\n curve_fit = fitfunc(x_curve, *fitguess)\n popt = fitguess\n perr = np.zeros(np.size(popt))\n else:\n #dynamically define the fit function with subset of fixed arguments\n def func(x,*var):\n args = np.array([])\n var_count = 0\n #build array of parameters, based on the hold paramter\n for i in range(np.size(fitguess)):\n if hold[i]:\n #get from fitguess, paramter not varied\n args = np.append(args,fitguess[i])\n else:\n #get from input to func, will actaully be varied\n args = np.append(args,var[var_count])\n var_count+=1\n return fitfunc(x,*args)\n #actually do the fit, with the subset of params, varin\n popt,pcov = opt.curve_fit(func, x, y, p0=varin)\n try:\n perr = np.sqrt(np.diag(pcov))\n except ValueError:\n print(\"Ill defined fits\")\n perr = np.zeros(np.size(fitguess))\n x_curve = np.linspace(np.min(x),np.max(x), num=200)\n curve_fit = func(x_curve, *popt)\n\n#fit message\n poptf = ('%.3g, ' * len(popt))[:-1] % tuple(popt)\n perrf = ('%.3g, ' * len(perr))[:-1] % tuple(perr)\n\n fit_message = 'Curve fit results: ' + poptf\n fit_mess2 = '\\n uncertianties: ' + perrf\n\n fit_message = fit_message + fit_mess2\n\n if show is True:\n #build figure\n plt.close()\n\n if axis == 'default':\n axis = [0.0, 1.1*np.max(x), 0.0, 1.1*np.max(y)]\n elif axis == 'auto':\n axis = auto_extent(x,y)\n plt.axis(axis)\n\n plt.plot(x,y,'o')\n plt.plot(x_curve,curve_fit,'-')\n\n #labels\n plt.xlabel(labels[0])\n plt.ylabel(labels[1])\n ym = axis[-2]\n xm = axis[0]\n y_pos_msg = ym-(0.3*np.abs(axis[3]-ym))\n\n if xm is 0.0: x_pos_msg = 0.0\n else: x_pos_msg = xm\n\n plt.text(x_pos_msg, y_pos_msg, fit_message, fontsize=10)\n\n name = fitfunc.__name__\n if save:\n name_out = name+'_fit.png'\n plt.savefig(name_out, format='png', bbox='tight')\n\n if labels[2]=='default':\n plt.title(name)\n else:\n plt.title(labels[2])\n\n plt.show()\n else:\n print(fit_message)\n\n return popt, perr\n\n\n\n\n" }, { "alpha_fraction": 0.6660929322242737, "alphanum_fraction": 0.6695352792739868, "avg_line_length": 35.34375, "blob_id": "b536c0ecd7626d6e8c6ee6983e6a65ede9aa9af6", "content_id": "c550691ccb6a6052bf4f90e9fb307584112f9b92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1162, "license_type": "no_license", "max_line_length": 110, "num_lines": 32, "path": "/Calculations/plots.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "import numpy as np\nimport scipy as sp\nfrom matplotlib.pyplot import *\n\ndef plot_polar_contour(values, Phi, R, color='seismic',clabel='Colormap'):\n \"\"\"Plot a polar contour plot, with 0 degrees at the North.\n \n Arguments:\n \n * `values` -- A list (or other iterable - eg. a NumPy array) of the values to plot on the\n contour plot (the `z` values)\n * `Phi` -- A list of angles (in radians)\n * `R` -- A list of radii \n \n The shapes of these lists are important, and are designed for a particular\n use case (but should be more generally useful). The values list should be `len(azimuths) * len(zeniths)`\n long with data for the first azimuth for all the zeniths, then the second azimuth for all the zeniths etc.\n \n This is designed to work nicely with data that is produced using a loop as follows:\n \n \"\"\"\n #R, Phi = np.meshgrid(r, phi)\n fig, ax = subplots(subplot_kw=dict(projection='polar'))\n ax.set_theta_zero_location(\"E\")\n ax.set_theta_direction(1)\n plt.set_cmap(color)\n cax = ax.contourf(Phi,R, values, 20)\n plt.set_cmap(color)\n cb = fig.colorbar(cax)\n cb.set_label(clabel)\n \n return fig, ax, cax" }, { "alpha_fraction": 0.540012776851654, "alphanum_fraction": 0.5598591566085815, "avg_line_length": 24.61475372314453, "blob_id": "27e9e31628ed5c294243d91f4430422a10f56321", "content_id": "c883bd70acc72718a0519321ba9d69ef802158ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3124, "license_type": "no_license", "max_line_length": 76, "num_lines": 122, "path": "/Analysis/XY_plot_and_fit_example.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 13 21:02:26 2015\n\n@author: justinbohnet\n\"\"\"\n\nimport os\nimport numpy as np\nimport scipy.optimize as opt\n\nimport matplotlib.pyplot as plt\n\n#define fit functions\ndef exp_decay(time, Gamma):\n return np.exp(-(Gamma*time)**1)\n \ndef gaussian_decay(time, Gamma):\n return np.exp(-(Gamma*time)**2)\n\n\ndef get_immediate_subdirectories(a_dir):\n return [name for name in os.listdir(a_dir)\n if os.path.isdir(os.path.join(a_dir, name))]\n \nfolders = get_immediate_subdirectories(os.getcwd())\n\n#Do the analysis on all the folders\n#define intial parameters\ndcounts = 13\nGamma_guess = 180e-6\n\n#bins for data\n\ndata_set = np.zeros(0)\nGamma = np.zeros(0)\nGamma_err = np.zeros(0)\n\nfor folder in folders:\n os.chdir(folder)\n \n res, res_err, file_name = analysis()\n\n Gamma = np.append(Gamma, res)\n Gamma_err = np.append(Gamma_err, res_err)\n data_set = np.append(data_set,folder)\n \n os.chdir('..')\n\nout = np.array([data_set, Gamma, Gamma_err]).transpose()\nnp.savetxt('results.csv',out,delimiter=',',fmt=\"%s\")\n\n#%%\n#define analysis functions\n\ndef analysis():\n for file in os.listdir(os.getcwd()):\n if file.startswith(\"phaseFlop\") and file.endswith(\".csv\"):\n file_name = file\n \n data = np.genfromtxt(file_name,delimiter =\",\",\n names=True,\n dtype=None,\n skip_header=1)\n \n arm_t_us = data['fp_t_us'][0:-1]\n \n if data['fit_B'][0] > data['fit_A'][0]:\n max_count = (data['fit_B'][0:-1] - dcounts)\n min_count = (data['fit_A'][0:-1] - dcounts)\n else:\n max_count= (data['fit_A'][0:-1] - dcounts)\n min_count = (data['fit_B'][0:-1] - dcounts)\n \n time = 2*arm_t_us\n C = (max_count - min_count)/(max_count+min_count)\n \n popt, perr, filename_out = plot2D_fit(time, C, exp_decay, [Gamma_guess],\n labels=['Total scattering time [us]','Contrast'])\n \n print(file_name) \n \n return popt, perr, file_name\n\ndef plot2D_fit(x,y,fitfunc, fitguess,\n labels=['X','Y'], axis='default', save=True):\n \n popt,pcov = opt.curve_fit(fitfunc, x, y,\n p0=[fitguess])\n perr = np.sqrt(np.diag(pcov))\n curve_fit = fitfunc(x, *popt) \n \n poptf = ('%.3g, ' * len(popt))[:-1] % tuple(popt)\n perrf = ('%.3g, ' * len(pcov))[:-1] % tuple(perr)\n \n fit_message = 'Curve fit results: ' + poptf \n fit_mess2 = '\\n uncertianties: ' + perrf \n \n fit_message = fit_message + fit_mess2\n \n if axis == 'default':\n axis = [0.0, np.max(x), 0, np.max(y)]\n \n plt.close()\n plt.axis(axis)\n plt.plot(x,y,'o')\n plt.plot(x,curve_fit,'-')\n \n #labels\n plt.xlabel(labels[0])\n plt.ylabel(labels[1])\n plt.text(0, -0.3*axis[-1], fit_message, fontsize=10) \n \n filename_out = file_name[0:-4] + '_fig.png'\n plt.title(filename_out, fontsize=10)\n \n if save:\n plt.savefig(filename_out, format='png', bbox='tight')\n \n plt.show()\n \n return popt, perr, filename_out" }, { "alpha_fraction": 0.6812267899513245, "alphanum_fraction": 0.7304832935333252, "avg_line_length": 29.77142906188965, "blob_id": "dedbde4bda0b38216f3cdc4a4ebecf0b36c506df", "content_id": "f4fd5e7218a8e874e90ca33472082618d1cf51e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1076, "license_type": "no_license", "max_line_length": 82, "num_lines": 35, "path": "/Calculations/scicons.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\nimport scipy.constants\n\npi = scipy.constants.pi\n\nc = scipy.constants.c\nh = scipy.constants.h\nhbar = scipy.constants.hbar\nmu_0 = scipy.constants.mu_0\nepsilon0 = scipy.constants.epsilon_0\nk_e = 1/(4*pi*epsilon0) #Coulombs constant\nq = scipy.constants.elementary_charge\nm_e = scipy.constants.m_e\nm_p = scipy.constants.m_p\nm_n = scipy.constants.m_n\nk_b = scipy.constants.k\nalpha = scipy.constants.alpha\na0 = scipy.constants.physical_constants['atomic unit of length'][0]\nmu_b = scipy.constants.physical_constants['Bohr magneton'][0]\nmu_n = scipy.constants.physical_constants['nuclear magneton'][0]\nE_h = scipy.constants.physical_constants['atomic unit of energy'][0]\nge = scipy.constants.physical_constants['electron g factor'][0] #electron g-factor\n\namu = 1.66057e-27\nm_Be = m_p * 8.9465\n\ngIp = -0.784955 #Be nuclear g-factor\nAhfS = -625.008837e6 * 2*pi #Hyperfine coefficient \nAhfP = -118.6e6 * 2*pi #Hyperfine coefficient for P state\nmI = 3./2. #nuclear spin projection of Be+ used for experiments" }, { "alpha_fraction": 0.5317160487174988, "alphanum_fraction": 0.568157970905304, "avg_line_length": 34.01102828979492, "blob_id": "5d519d5a5f259ebf9b477b45cd6360cae37bba81", "content_id": "3b22038dc2fba9fc1a7030027ba4eb8c4f2c7409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9522, "license_type": "no_license", "max_line_length": 105, "num_lines": 272, "path": "/squeeze_func.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 27 10:40:23 2015\n\n@author: justinbohnet\n\"\"\"\n\nimport numpy as np\nfrom numpy import sin, cos, pi, sqrt\nimport matplotlib.pyplot as plt\n\nimport hfGUIdata\nimport model_fit as mf\n\ndef sq_analysis(max_c, min_c, N, sigA, k0, Jbar_1kHz):\n\n # Get data\n file_name, scandata, m, pmterr, trials, data = hfGUIdata.get_raw_counts()\n\n #data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n arm_time = np.mean(data['arm_t'])\n pi_time = np.mean(data['middle_t'])\n det_n = np.mean(data['det_n'])\n\n # Calculate derived quantities\n detune = det_n*1e3/(arm_time) # kHz\n phi = (scandata/pi*180.0) #degrees\n J = 2*Jbar_1kHz/detune/N # interaction strength, scaled for detuning\n\n pmterr_uncert = pmterr/sqrt(2*trials)\n m_avg = np.mean(m)\n K = max_c - min_c\n\n\n def noise_model(phi, Ntot, sigA, k0):\n phi = phi*pi/180\n varPN = K**2 * (sin(phi)**2)/4.0/Ntot # extra factor of K is for conversion to photons\n varAN = (sigA * sin(phi))**2 # sigA is the std dev of rotation of theta in radians\n varSN = K*(sin(phi/2.)**2) + min_c\n varTN = k0**2 * (K*(sin(phi/2.)**2) + min_c)**2\n return sqrt(varPN + varAN + varSN + varTN)\n\n\n\n # to predict the SQL, don't include the added noise in the anti-squeezed quad\n SQL_stdev = noise_model(90., N, 0.0, k0)\n\n title_name = \"data set: \"+file_name[13:-8]\n title = \"%s, $t_{a}$:%d us, $\\delta$:%.3g kHz\"%(title_name, arm_time, detune)\n\n label = ['Final rotation [degrees]', 'Std. Dev. PMT counts', title]\n\n plt.close()\n plt.errorbar(phi, pmterr, yerr=pmterr_uncert, fmt='o')\n plt.plot(phi,np.ones(np.size(phi))*SQL_stdev,'-')\n plt.xlabel(label[0])\n plt.ylabel(label[1])\n plt.title(title)\n plt.axis([0,np.max(phi),0,1.5*np.max(pmterr)])\n\n # try to calculated the squeezed state from OAT\n psi = np.linspace(0,720,num=400)*pi/180.\n t = 2*arm_time*1e-6\n\n Acoef = 1-cos(2*J*t)**(N-2)\n Bcoef = 4*sin(J*t)*cos(J*t)**(N-2)\n delt = 0.5*np.arctan2(Bcoef,Acoef)\n varJz = N/4.0*(1+(N/4.0-0.25)*(Acoef - sqrt(Acoef**2+Bcoef**2)*cos(2*(psi+delt))))\n\n sig_squeeze = sqrt(varJz/N**2 * K**2)\n sig_squeeze = sqrt(sig_squeeze**2 + 0.0**2 + np.mean(m) + (k0*np.mean(m))**2)\n plt.plot(180/pi*(psi),sig_squeeze,'-')\n #plt.axis([-5,370,0,80])\n plt.show()\n\n # Fitting to estimate of anti-squeeze, angles\n plt.close()\n def varfit(psi, maxf, minf, angle):\n psi = pi*psi/180.0\n return (maxf - minf)*cos(psi + angle)**2 + minf\n res, res_err = mf.plot_fit(phi, pmterr**2, varfit, np.array([4.0e4, 2.5e3, 0.0]),\n hold=np.array([False,False,False]), axis='auto')\n\n # check that a cosine is a valid model for extracting parameters from data\n# plt.plot(180/pi*(psi),sig_squeeze**2,'-')\n# plt.plot(phi, pmterr**2,'o')\n# plt.axis([0,370,0,60e3])\n\n alpha_deg = res[2]*180.0/pi\n alpha_deg_err = res_err[2]*180.0/pi\n\n if res[0] > res[1]:\n var_max_est = res[0]\n var_max_err = res_err[0]\n\n var_min_est = res[1]\n var_min_err = res_err[1]\n else:\n var_max_est = res[1]\n var_max_err = res_err[1]\n\n var_min_est = res[0]\n var_min_err = res_err[0]\n\n sdev_min = np.min(pmterr)\n m_min = m[pmterr == sdev_min][0]\n\n #R = (sdev_min**2 - ((sigA)**2 + m_min + (k0*m_min)**2))/(K**2/4.0/N)\n R = (sdev_min**2 - m_min)/(K**2/4.0/N) # conservative inferred squeezing\n RO = (sdev_min**2)/(K**2/4.0/N)\n\n print(\"=========== Numbers from the data ==============\")\n print(title_name)\n print('Average PMT counts: {:.4g}, SN: {:.4g}'.format(m_avg,sqrt(m_avg)))\n print('N: {}, PN: {:.4g} counts'.format(N, K/2./sqrt(N)))\n print('Number of photons for N ions, K: {:.4g}'.format(K))\n print('Predicted SQL (PN+SN+TN): {:.4g}'.format(SQL_stdev))\n print('predicted chi from ACSS: {}'.format(J))\n #print('Error bars are statistical: 1/sqrt(2*trials)')\n print('---------------------------------------------------')\n print(\"Lowest Std Dev. {:.4g} counts\".format(sdev_min))\n print(\"Observed spin noise reduction: {:.4g}, {:.4g}\".format(RO, 10*np.log10(RO)))\n print(\"Inferred spin noise reduction (subtract SN only): {:.4g}, {:.4g}\".format(R, 10*np.log10(R)))\n print('Angle alpha [deg]: {0:.4g}'.format(alpha_deg) + '+-' + '{0:.4g}'.format(alpha_deg_err))\n print('Anti-squeezed std dev. {0:.4g} +- {1:.4g} counts'.format(sqrt(var_max_est),sqrt(var_max_err)))\n\n '''\n axial = data['raman_df'] - detune\n plt.close()\n plt.figure(1)\n plt.subplot(211)\n plt.plot(phi, axial)\n plt.xlabel(label[0])\n plt.ylabel('Axial freq. [kHz]')\n #plt.axis([0,360,1560,1565])\n\n plt.subplot(212)\n plt.plot(phi, m,'o')\n plt.xlabel(label[0])\n plt.ylabel('Avg PMT')\n plt.tight_layout()\n '''\n\n return 2*arm_time, detune, np.mean(m), np.min(pmterr), phi, pmterr, psi, sig_squeeze, RO\n\ndef cal_analysis(mask_range, hold=False, Nguess=100):\n\n # Get data\n file_name, scandata, m, pmterr, trials, data = hfGUIdata.get_raw_counts()\n\n #data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n\n pi_time = hfGUIdata.get_ionProp_value('sf%fitParams%sf_fitParam_tpi')\n det_t = hfGUIdata.get_ionProp_value('detection%det_t')\n\n # Calculate derived quantities\n phi = ((scandata/pi) * 180.0) # degrees\n\n # Make plot for calibrating Bloch Vector angle\n title_name = \"data set: \" + file_name[13:-8]\n title = \"%s\"%(title_name)\n label = ['Polar angle $\\phi$ [degrees]', 'Avg PMT counts', title]\n\n def constrast_fit(phi, max_counts, min_counts):\n phi = phi*pi/180\n return (max_counts - min_counts)*sin(phi/2.)**2 + min_counts\n res, res_err = mf.plot_fit(phi, m, constrast_fit, np.array([1000.0, 150.0]),\n labels=label, axis='default')\n # Store results for PMT counts calibration\n max_c_fit = res[0]\n min_c_fit = res[1]\n K = max_c_fit - min_c_fit # photons per N ions\n N = Nguess\n\n #option to make a mask for noise calibration\n if mask_range is 0:\n phi_m = phi\n pmterr_m = pmterr\n else:\n mask = np.ones(np.size(phi)).astype(bool)\n mask[mask_range[0]:mask_range[1]] = False\n phi_m = phi[mask]\n pmterr_m = pmterr[mask]\n\n label = ['Polar angle $\\phi$ [degrees]', 'Std. Dev. PMT counts', title]\n fitguess = np.array([N, 0.01, 0.01])\n\n def noise_model(phi, Ntot, sigA, k0):\n phi = phi*pi/180\n varPN = K**2 * (sin(phi)**2)/4.0/Ntot # extra factor of K is for conversion to photons\n varAN = (sigA * sin(phi))**2 # sigA is the std dev of rotation of theta in radians\n varSN = K*(sin(phi/2.)**2) + min_c_fit\n varTN = k0**2 * (K*(sin(phi/2.)**2) + min_c_fit)**2\n return sqrt(varPN + varAN + varSN + varTN)\n\n\n if hold is False:\n res, res_err = mf.plot_fit(phi_m, pmterr_m, noise_model, fitguess,\n labels=label, axis='default')\n else:\n res, res_err = mf.plot_fit(phi_m, pmterr_m, noise_model, fitguess,\n labels=label, hold=hold, axis='default')\n if np.size(res) is 3:\n Nfit = res[0]\n sigA = res[1]\n k0 = res[2]\n if np.size(res) is 2:\n Nfit = N\n sigA = res[0]\n k0 = res[1]\n\n # Show the fitted noise on the plot\n phi_show = np.linspace(0,360,num=200)\n phi_rad = phi_show*pi/180.0\n varPN = K**2 * (sin(phi_rad)**2)/4.0/Nfit\n varAN = (sigA * sin(phi_rad))**2\n varSN = K*(sin(phi_rad/2.)**2) + min_c_fit\n varTN = k0**2 * (K*(sin(phi_rad/2.)**2) + min_c_fit)**2\n plt.plot(phi_m, pmterr_m**2,'o')\n plt.plot(phi_show, noise_model(phi_show, Nfit, sigA, k0)**2)\n plt.plot(phi_show, (varPN), label='PN')\n plt.plot(phi_show, (varAN), label='AN')\n plt.plot(phi_show, (varSN), label='SN')\n plt.plot(phi_show, (varTN), label='TN')\n plt.xlabel('Polar angle $\\phi$ [degrees]')\n plt.ylabel('Var PMT counts')\n plt.title(title)\n plt.legend()\n plt.show()\n\n return max_c_fit, min_c_fit, Nfit, sigA, k0\n \ndef con_analysis(max_full=100.0, min_full=0.0):\n \n # Get data\n file_name, scandata, m, pmterr, trials, data = hfGUIdata.get_raw_counts()\n\n #data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n\n pi_time = hfGUIdata.get_ionProp_value('sf%fitParams%sf_fitParam_tpi')\n det_t = hfGUIdata.get_ionProp_value('detection%det_t')\n\n # Calculate derived quantities\n phi = ((scandata/pi) * 180.0) # degrees\n\n # Make plot for calibrating Bloch Vector angle\n title_name = \"Contrast: \" + file_name[13:-8]\n title = \"%s\"%(title_name)\n label = ['Polar angle $\\phi$ [degrees]', 'Avg PMT counts', title]\n def constrast_fit(phi, max_counts, min_counts):\n phi = phi*pi/180\n return (max_counts - min_counts)*sin(phi/2.)**2 + min_counts\n res, res_err = mf.plot_fit(phi, m, constrast_fit, np.array([1000.0, 150.0]),\n labels=label, axis='default')\n \n if res[0] > res[1]:\n max_r = res[0]\n mar_r_err = res_err[0]\n\n min_r = res[1]\n min_r_err = res_err[1]\n else:\n max_r = res[1]\n mar_r_err = res_err[1]\n\n min_r = res[0]\n min_r_err = res_err[0]\n \n contrast = (max_r-min_r)/(max_full-min_full)\n contrast_err = sqrt(res_err[0]**2 + res_err[1]**2) / (max_full-min_full)\n \n return contrast, contrast_err" }, { "alpha_fraction": 0.5357068777084351, "alphanum_fraction": 0.5590616464614868, "avg_line_length": 36.193050384521484, "blob_id": "6f4cc7125b3587b09bf5e1ae92f33c33fb0f7e73", "content_id": "4064ce6f773181c15fc5a5faced584c2c011b219", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9634, "license_type": "no_license", "max_line_length": 90, "num_lines": 259, "path": "/Analysis/Untitled.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 11:09:48 2015\n\n@author: jgb, jwb\n\"\"\"\nfrom __future__ import division\nfrom scipy.constants import pi\nimport numpy as np\nimport scipy.ndimage as ndi\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport os\nimport skimage\nimport skimage.exposure\nimport skimage.feature\n\n# JWB 5/27/2015 ... changes\n# added relative_dir_data optional argument for quantar_image.__init__()\n# symmetric axes for quantar_image.make_lab_image()\n# providing default values is a form of self documentation\n# add im_range to make_rot_image()\n# add crop_image() subroutine\n# add fix_contrast() subroutine\n# add plot_img_and_hist() subroutine\n\nclass quantar_image:\n \"\"\"A class for images from quantar .dat files\n **parameters**, **types**, **return** and **return types**::\n\n :param x0:\n :param y0:\n :param num_to_read:\n :param file_time: time of each file\n :param fwall: rotating wall freq [kHz]\n :param first_file: string of first file to read\n :param relative_dir_data: relative path to data folder (e.g. 'my_data')\n :return: return description\n :rtype: the return type description\n\n \"\"\"\n bins = 250\n\n #the constructor stores the data from the .dat files in the class\n def __init__(self, x0=0, y0=0, num_to_read=1, file_time=1.0,\n fwall=100.0, first_file='00000001.dat',\n relative_dir_data=''):\n #data for creating images\n self.x0 = x0\n self.y0 = y0\n self.fw = fwall\n\n self.rot_image = None\n self.extent = [0,0,0,0]\n\n #data for getting back to the raw data\n self.file_time = file_time\n self.first_file = first_file #the number of the first .dat file for data\n self.num_to_read = num_to_read #the number of files used to create raw data\n\n #initialize background histogram\n self.bckgnd = np.zeros((self.bins,self.bins))\n\n found_flag = 0\n print_flag = 0\n\n self.x = np.empty(0)\n self.y = np.empty(0)\n self.t = np.empty(0)\n\n path_to_data_dir = os.getcwd() + '\\\\' + relative_dir_data\n file_list = os.listdir(path_to_data_dir)\n num_read = 0\n\n for name in file_list:\n if name == self.first_file:\n found_flag = 1\n if found_flag == 1 and num_read < self.num_to_read:\n fpath = path_to_data_dir + '\\\\' + name\n with open(fpath, 'rb') as f:\n norf = f.read(16)\n if print_flag: print(norf)\n\n N_reps_per_scan, = np.fromfile(f, dtype=np.uint32, count=1)\n if print_flag: print(N_reps_per_scan)\n\n rep_boundary_message = f.read(14)\n if print_flag: print(rep_boundary_message)\n\n N_this_rep, = np.fromfile(f, dtype=np.uint32, count=1)\n if print_flag: print(N_this_rep)\n\n if N_this_rep > 0:\n x = np.fromfile(f, dtype=np.float64, count=N_this_rep, sep=\"\")\n y = np.fromfile(f, dtype=np.float64, count=N_this_rep, sep=\"\")\n t = np.fromfile(f, dtype=np.float64, count=N_this_rep, sep=\"\")\n\n self.x = np.append(self.x,x + x0)\n self.y = np.append(self.y,y + y0)\n self.t = np.append(self.t,t + self.file_time * num_read)\n\n num_read += 1\n '''attempt to scale the data to correct xy distances\n conversion from quantar data to um is (53mm/60)*(97um/38.5mm)\n based on the size of the cloud on 032420125 and the known size of\n the qtxyt3 centering cicle\n '''\n conversion = (53/60.0)*(97.0/38.5)\n self.x = self.x*conversion\n self.y = self.y*conversion\n\n def crop_image(self, image, c):\n lx, ly = image.shape\n return image[lx/2*(1-c) : lx/2*(1+c), ly/2*(1-c): ly/2*(1+c)]\n\n def fix_contrast(self, img, debug=False):\n # normalize\n img = img / np.max(img)\n\n # Contrast stretching\n p2, p98 = np.percentile(img, (2, 98))\n img_rescale = skimage.exposure.rescale_intensity(img, in_range=(p2, p98))\n\n if debug:\n # Display results\n fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 5))\n ax_img, ax_hist, ax_cdf = self.plot_img_and_hist(img, axes[:, 0])\n ax_img.set_title('Low contrast image')\n y_min, y_max = ax_hist.get_ylim()\n ax_hist.set_ylabel('Number of pixels')\n ax_hist.set_yticks(np.linspace(0, y_max, 5))\n\n ax_img, ax_hist, ax_cdf = self.plot_img_and_hist(img_rescale, axes[:, 1])\n ax_img.set_title('Contrast stretching')\n # prevent overlap of y-axis labels\n fig.subplots_adjust(wspace=0.4)\n plt.show()\n\n return img_rescale\n\n def plot_img_and_hist(self, img, axes, bins=256):\n \"\"\"Plot an image along with its histogram and cumulative histogram.\n\n \"\"\"\n img = skimage.img_as_float(img)\n ax_img, ax_hist = axes\n ax_cdf = ax_hist.twinx()\n\n # Display image\n ax_img.imshow(img, cmap=plt.cm.gray)\n ax_img.set_axis_off()\n\n # Display histogram\n ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')\n ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))\n ax_hist.set_xlabel('Pixel intensity')\n ax_hist.set_xlim(0, 1)\n ax_hist.set_yticks([])\n\n # Display cumulative distribution\n img_cdf, bins = skimage.exposure.cumulative_distribution(img, bins)\n ax_cdf.plot(bins, img_cdf, 'r')\n ax_cdf.set_yticks([])\n\n return ax_img, ax_hist, ax_cdf\n\n def set_background_hist(self, image):\n \"\"\"takes an instance of the class, stores it as a background\"\"\"\n xLab = image.x\n yLab = image.y\n phaseOfWall = 2*pi*image.fw* image.t\n\n xRot = xLab * np.cos(phaseOfWall) + yLab * np.sin(phaseOfWall)\n yRot = yLab * np.cos(phaseOfWall) - xLab * np.sin(phaseOfWall)\n\n #Make Rotating Frame Image\n counts_background, xedges, yedges, RotFrame = plt.hist2d(xRot,yRot,bins=self.bins,\n cmap = mpl.cm.Blues,normed=False)\n self.bckgnd = counts_background*self.num_to_read/image.num_to_read\n\n def make_lab_image(self, im_range=[-256,256,-256,256], gfilter=0.0):\n \"\"\"plot lab frame image\n\n :param im_range: [-256,256,-256,256] is full range for Quantar\n :gfilter: ndi.gaussian_filter() argument\n :return: return description\n \"\"\"\n xLab = self.x\n yLab = self.y\n\n plt.subplot(111, aspect='equal')\n ax = plt.gca()\n ax.grid(True)\n counts, xedges, yedges, LabFrame = plt.hist2d(xLab, yLab,\n bins=self.bins, cmap = mpl.cm.Blues,\n normed=False)\n extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]\n counts_filter = ndi.gaussian_filter(counts-self.bckgnd, gfilter)\n #LabFrame = plt.imshow(counts_filter-self.bckgnd, cmap = mpl.cm.Blues,\n # vmin = 0, vmax = np.max(counts_filter))\n plt.axis(im_range)\n plt.xlabel(\"x [$\\mu$m]\")\n plt.ylabel(\"y [$\\mu$m]\")\n plt.show(LabFrame)\n return counts_filter,extent\n\n def make_rot_image(self, im_range=[-256,256,-256,256], gfilter=0.0):\n \"\"\"plot rotating frame image\n\n :param im_range: [-256,256,-256,256] is full range for Quantar\n :gfilter: ndi.gaussian_filter() argument\n :return: return description\n \"\"\"\n xLab = self.x\n yLab = self.y\n phaseOfWall = 2*pi*self.fw* self.t\n\n xRot = xLab * np.cos(phaseOfWall) + yLab * np.sin(phaseOfWall)\n yRot = yLab * np.cos(phaseOfWall) - xLab * np.sin(phaseOfWall)\n\n #Make Rotating Frame Image\n plt.subplot(111, aspect='equal')\n ax = plt.gca()\n ax.grid(True)\n counts, xedges, yedges, RotFrame = plt.hist2d(xRot, yRot,\n bins=self.bins,\n cmap = mpl.cm.Blues, normed=False)\n extent = [yedges[0], yedges[-1], xedges[0], xedges[-1]]\n counts_filter = ndi.gaussian_filter(counts-self.bckgnd,gfilter)\n RotFrame = plt.imshow(counts_filter,extent=extent, cmap = mpl.cm.Blues,\n vmin = 0, vmax = np.max(counts_filter))\n plt.axis(im_range)\n plt.xlabel(\"x [$\\mu$m]\")\n plt.ylabel(\"y [$\\mu$m]\")\n plt.show(RotFrame)\n self.rot_image = counts_filter\n self.extent = extent\n\n def get_ion_positions(self):\n self.coordinates = skimage.feature.peak_local_max(self.rot_image,\n min_distance=3.0,threshold_rel=0.4)\n\n def show_rot_image(self, im_range, low_threshold= 0):\n if np.size(self.rot_image) == 0:\n print(\"No rotating frame image, must make_rot_image() first\")\n else:\n image = np.copy(self.rot_image)\n image[image < low_threshold] = 0\n RotFrame = plt.imshow(image,extent=self.extent, cmap = mpl.cm.Blues,\n vmin = 0, vmax = np.max(image))\n if im_range!=None: plt.axis(im_range)\n plt.xlabel(\"x [$\\mu$m]\")\n plt.ylabel(\"y [$\\mu$m]\")\n plt.show(RotFrame)\n\n\n######### useful functions but shouldn't be stored in the class ########\ndef im_extent(mag):\n return np.array([-mag,mag,-mag,mag])\n\n" }, { "alpha_fraction": 0.532667875289917, "alphanum_fraction": 0.5470356941223145, "avg_line_length": 41.11040496826172, "blob_id": "7a07a12745a848318217532ce54699a28c159e0b", "content_id": "c693188bd33e6b80597c5036c0a6d9d97e9ba617", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19836, "license_type": "no_license", "max_line_length": 111, "num_lines": 471, "path": "/Analysis/histData_plot_analyze.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nA series of functions for quickly visualizing and analyzing different histData\nfiles generated by HF GUI. if you important this script into ipython while \nin a folder with the histData.csv, you can visualize the data with minimal \ncommands.\n\nImportant functions:\n\n- get_histData(max_count=100, min_count=0)\n - return file_name, scandata, b_prob, pmterr, histdata, histextent, xname\n - assumes only 1 histData.csv in the current folder\n - rescales the avg pmt counts to fraction population bright, can pass\n max_count and min_count\n \n- plot_histData(max_count=100, min_count=0, aspect_ratio=20, ptype='hist', x_unit='$\\mu$s'):\n - visualizes data in the histData.csv of a folder.\n - if you like the default values, don't need to pass anything\n - ptype changes how plot looks, options are 'points', 'errorbars' or default\n - simple function just uses get_histData and make_plot\n \n- make_plot()\n - this function is really for other functions to use, as you have to pass\n in all the data data parameters, but it allows the code for the plots \n to not be duplicated\n\n-------- Then there are a variety of analysis functions for specific kinds of experiments -- \n They generate fitted curves for the models defined in the functions\n You must supply fit_guess, other inputs are optional\n You can see a fit of your guess by passing plot_guess = True\n \n- analyze_histData_flopping_t(fit_guess), 2 param\n- analyze_histData_mean_field(fit_guess), 1 param\n \n\n\"\"\"\n\nimport numpy as np\nimport scipy.optimize as optimize\nimport matplotlib.pyplot as plt\nimport os\n\n#np.genfromtxt('histData.20150317_170505.321.csv', unpack=True, skip_header=1, dtype=None, usecols=(1,2,13))\n\ndef plot_histData(max_count=100, min_count=0, aspect_ratio='auto', \n ptype='hist', x_unit='$\\mu$s'):\n #Get the data file name\n file_name, scandata, b_prob, pmterr, histdata, histextent, xname = get_histData(max_count, min_count)\n \n make_plot(file_name, scandata, b_prob, pmterr, histdata, \n histextent=histextent, xname=xname, \n aspect_ratio=aspect_ratio, ptype=ptype, x_unit=x_unit) \n \ndef analyze_histData_flopping_t(fit_guess, plot_guess=False, max_count=100, \n min_count=0.0, aspect_ratio='auto', \n ptype='hist', x_unit='$\\mu$s'):\n#Get the data file name\n file_name, scandata, b_prob, pmterr, histdata, histextent, xname = get_histData(max_count, min_count)\n #data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n #freq = np.mean(data['scan_f'])\n \n#define fit model\n #calculate full contrast\n \n def flopping_t(t, t_pi, Gamma):\n return 0.5*(1 + np.exp(-Gamma*t)*(np.cos((np.pi/t_pi)*t)))\n \n popt,pcov = optimize.curve_fit(flopping_t, scandata, b_prob,\n p0=fit_guess)\n perr = np.sqrt(np.diag(pcov))\n curve_fit = flopping_t(scandata, *popt) \n fit_message = \"Params: %s\\n Std Dev: %s\"%(str(popt),str(perr)) \n print(fit_message)\n sG0 = '{0:.1f}'.format(popt[0])\n sG1 = '{0:.0f}'.format(1000*popt[1])\n title = \"flopping_t, t_pi: {} $\\mu$s, $\\Gamma$: {} ks^-1\".format(sG0,sG1)\n \n#no fit\n if plot_guess:\n curve_fit = flopping_t(scandata, *fit_guess)\n sG0 = '{0:.1f}'.format(fit_guess[0])\n sG1 = '{0:.0f}'.format(1000*fit_guess[1])\n title = \"flopping_t, t_pi: {} $\\mu$s, $\\Gamma$: {} ks^-1\".format(sG0,sG1)\n \n \n#create data plot\n make_plot(file_name, scandata, b_prob, pmterr, histdata, \n histextent=histextent, xname=xname, curve_fit=curve_fit, \n aspect_ratio=aspect_ratio, ptype=ptype, x_unit=x_unit, \n ptitle = title)\n \n \ndef analyze_histData_mean_field(fit_guess, plot_guess=False, max_count=100, \n min_count=0.0, \n aspect_ratio='auto', \n ptype='points', \n x_unit='$\\mu$s'):\n#Get the data file name\n file_name, scandata, b_prob, pmterr, histdata, histextent, xname = get_histData(max_count, min_count)\n \n#define fit model\n data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n arm_time = np.mean(data['arm_t'])\n pi_time = np.mean(data['middle_t']) \n \n def meanfieldfit(rot_time, J, Gamma):\n theta = rot_time/pi_time*np.pi #radians\n return 0.5*(1 - np.exp(-Gamma*2*arm_time)*np.sin(theta)*np.sin(2*J*np.cos(theta)*2*arm_time))\n \n popt,pcov = optimize.curve_fit(meanfieldfit, scandata, b_prob,\n p0=fit_guess)\n perr = np.sqrt(np.diag(pcov))\n curve_fit = meanfieldfit(scandata, *popt) \n fit_message = \"Params: %s\\n Std Dev: %s\"%(str(popt),str(perr))\n print(fit_message)\n title = \"MF shift, $t_{arm}$:%d us, $t_{\\pi}$:%d us, J %.3g 1/s\"%(arm_time,pi_time,popt[0])\n \n #no fit\n if plot_guess:\n curve_fit = meanfieldfit(scandata, *fit_guess)\n title = \"$t_{a}$:%d us, $\\delta$:%.3g kHz, J %.3g 1/s\"%(arm_time,1e3/(arm_time),fit_guess[0]*1e6)\n \n#create data plot\n make_plot(file_name, scandata, b_prob, pmterr, histdata, \n histextent=histextent, \n xname=xname, curve_fit=curve_fit, aspect_ratio=aspect_ratio, \n ptype=ptype, x_unit=x_unit, ptitle = title)\n \n return popt, perr, file_name\n\ndef analyze_histData_mean_field_full(fit_guess, plot_guess=False, max_count=100, \n min_count=0.0, \n aspect_ratio='auto', \n ptype='points', \n x_unit='$\\mu$s'):\n#Get the data file name\n file_name, scandata, b_prob, pmterr, histdata, histextent, xname = get_histData(max_count, min_count)\n \n#define fit model\n data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n arm_time = np.mean(data['arm_t'])\n pi_time = np.mean(data['middle_t'])\n \n def meanfieldfit(rot_time, J, Gamma):\n theta = rot_time/pi_time*np.pi #radians\n return 0.5*(1 - np.exp(-Gamma*2*arm_time)*np.sin(theta)*np.sin(2*J*np.cos(theta)*2*arm_time))\n \n popt,pcov = optimize.curve_fit(meanfieldfit, scandata, b_prob,\n p0=fit_guess)\n perr = np.sqrt(np.diag(pcov))\n curve_fit = meanfieldfit(scandata, *popt) \n fit_message = \"Params: %s\\n Std Dev: %s\"%(str(popt),str(perr))\n print(fit_message)\n title = \"MF shift, $t_{arm}$:%d us, $t_{\\pi}$:%d us, J %.3g \"%(arm_time,pi_time,popt[0])\n \n #no fit\n if plot_guess:\n curve_fit = meanfieldfit(scandata, *fit_guess)\n title = \"MF shift, $t_{arm}$:%d us, $t_{\\pi}$:%d us, J %.3g \"%(arm_time,pi_time,fit_guess[0]*1e6)\n \n#create data plot\n make_plot(file_name, scandata, b_prob, pmterr, histdata, \n histextent=histextent, \n xname=xname, curve_fit=curve_fit, aspect_ratio=aspect_ratio, \n ptype=ptype, x_unit=x_unit, ptitle = title)\n return popt, perr, file_name\n\ndef analyze_histData_COM(fit_guess, w_a, Gamma, \n plot_guess=False, \n max_count=100, \n min_count=0.0, \n aspect_ratio='auto', \n ptype='points', \n x_unit='$\\mu$s'):\n#Get the data file name\n file_name, scandata, b_prob, pmterr, histdata, histextent, xname = get_histData(max_count, min_count)\n \n#define fit model\n data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n arm_time = np.mean(data['arm_t'])\n pi_time = np.mean(data['middle_t'])\n \n mu_R = 2*np.pi*scandata\n tau = arm_time\n \n def COMfit(mu_R, nm):\n delta = mu_R - w_a\n phi = (tau + pi_time)*delta\n \n alpha = w_a*(1-np.cos(phi)) + 1j*mu_R*np.sin(phi) - \\\n (np.exp(1j*w_a*tau) * \n (w_a*(np.cos(mu_R*tau)-np.cos(mu_R*tau+phi)) -\n 1j*(np.sin(mu_R*tau)-np.sin(mu_R*tau+phi))))\n \n alpha_cc = w_a*(1-np.cos(phi)) - 1j*mu_R*np.sin(phi) - \\\n (np.exp(-1j*w_a*tau) * \n (w_a*(np.cos(mu_R*tau)-np.cos(mu_R*tau+phi)) +\n 1j*(np.sin(mu_R*tau)-np.sin(mu_R*tau+phi))))\n \n asq = 2*np.sin(phi/2.)**2 * ((mu_R**2 - w_a**2)*(np.cos(phi)+np.cos(mu_R*tau+phi)) - \n 2*(-mu_R**2 -w_a**2 +\n np.cos(mu_R*tau)*np.cos(w_a*tau)*(mu_R**2*(1+np.cos(phi))+w_a**2*(1-np.cos(phi))) +\n (w_a**2 - mu_R**2)*np.cos(w_a*tau)*np.sin(mu_R*tau)*np.sin(phi) +\n 2*mu_R*w_a*np.sin(mu_R*tau)*np.sin(w_a*tau)))\n \n return 0.5*(1 - np.exp(-Gamma*2*tau) * np.exp(-2*asq*(2*nm+1)))\n \n popt,pcov = optimize.curve_fit(COMfit, mu_R, b_prob,\n p0=fit_guess)\n \n \n perr = np.sqrt(np.diag(pcov))\n curve_fit = COMfit(mu_R, *popt) \n fit_message = \"Params: %s\\n Std Dev: %s\"%(str(popt),str(perr))\n print(fit_message)\n message_var = popt[0] \n \n #no fit\n if plot_guess:\n curve_fit = COMfit(mu_R, *fit_guess)\n message_var = fit_guess[0]\n\n title = \"COM, $t_{arm}$:%d us, $t_{\\pi}$:%d us, n:$.2g \"%(arm_time,pi_time,message_var) \n \n#create data plot\n make_plot(file_name, scandata, b_prob, pmterr, histdata, \n histextent=histextent, \n xname=xname, curve_fit=curve_fit, aspect_ratio=aspect_ratio, \n ptype=ptype, x_unit=x_unit, ptitle = title)\n \n return popt, perr, file_name\n\n\ndef make_plot(file_name, scandata, b_prob, pmterr, histdata, \n histextent=[0.0,2.0], xname='', curve_fit=None,\n aspect_ratio='auto', ptype='hist', ptitle = '', x_unit='$\\mu$s'):\n \n #create data plot\n\n #check for fit data\n if curve_fit == None:\n fit_flag = False\n else:\n fit_flag = True \n \n#histogram overlay\n if ptype == 'hist':\n plt.imshow(np.transpose(histdata), vmin=0, vmax=np.max(histdata), \n origin='lower', aspect = aspect_ratio,\n extent=(np.min(scandata),np.max(scandata), histextent[0], histextent[1]))#,aspect =0.2\n plt.axis([np.min(scandata),np.max(scandata), -0.0, 1.2])\n plt.plot(scandata,b_prob, linestyle='solid',marker='.')\n if fit_flag:\n plt.plot(scandata, curve_fit, linestyle='solid')\n \n#error bar type overlay\n elif ptype == 'errorbar': \n plt.axis([0.0, np.max(scandata), 0, 1])\n plt.errorbar(scandata,b_prob, fmt='.', yerr=pmterr, elinewidth=1.0)\n if fit_flag:\n plt.plot(scandata, curve_fit, linestyle='solid')\n plt.axes().set_aspect(aspect_ratio)\n \n#just points \n elif ptype == 'points': \n plt.axis([0.0, np.max(scandata), 0, 1])\n plt.plot(scandata,b_prob,'o')\n if fit_flag:\n plt.plot(scandata, curve_fit, linestyle='solid')\n plt.axes().set_aspect(aspect_ratio)\n \n elif ptype == 'points_df': \n plt.axis([np.min(scandata), np.max(scandata), 0, 0.6])\n plt.plot(scandata,b_prob,'o')\n if fit_flag:\n plt.plot(scandata, curve_fit, linestyle='solid')\n plt.axes().set_aspect(aspect_ratio)\n\n else:\n print('ptype {} not found'.format(ptype))\n\n#labels \n plt.ylabel('Population in spin up')\n plt.xlabel(xname + ' [{}]'.format(x_unit))\n plt.title(ptitle, fontsize=14)\n\n#save\n filename_out = file_name[0:-4] + '_fig.png'\n plt.savefig(filename_out, format='png')\n \n \ndef get_histData(max_count=100, min_count=0):\n #Get the data file name\n file_name = False\n for file in os.listdir(os.getcwd()):\n if file.startswith(\"histData.\") and file.endswith(\".csv\"):\n file_name = file\n if file_name == False: \n print(\"Did not find file\")\n return 0\n else:\n #Get data from the file\n\n data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n columns = data.dtype.names\n non_hist_cols = columns.index('hist00') #defined by HFGUI expt type\n avg_pmt_col = columns.index('ave0')\n x,x_val,avg_pmt_counts = np.genfromtxt(file_name, \n unpack=True, \n skip_header=1, \n dtype=None, \n usecols=(1,2,avg_pmt_col), \n delimiter=',')\n xname = data.dtype.names[2]\n scandata = x_val\n\n num_scans = len(scandata[scandata == scandata[0]])\n points_in_scan = np.size(scandata)/num_scans\n \n if num_scans > 1:\n #have to average the data together\n scandata = scandata.reshape((num_scans,points_in_scan))\n scandata = np.mean(scandata, axis=0)\n avg_pmt_counts = avg_pmt_counts.reshape((num_scans,points_in_scan))\n avg_pmt_counts = np.mean(avg_pmt_counts, axis = 0)\n \n #get just the histogram data\n num_cols_total = len(data[0])\n num_hist_cols = num_cols_total - non_hist_cols-1\n hist_data_cols = np.arange(non_hist_cols+1, non_hist_cols+num_hist_cols+1)\n histdata = np.genfromtxt(file_name,delimiter=\",\",names=None,\n skip_header = 1,dtype=None,usecols=hist_data_cols)\n if num_scans > 1: \n #accumulate in bins from all data sets\n accu_hist_data = np.array([histdata[i:i+points_in_scan] for i in range(num_scans)])\n histdata = np.sum(accu_hist_data,axis=0)\n \n #calc error bars from histogram data \n counts_m = np.arange(num_hist_cols)\n counts = np.arange(1, num_hist_cols + 1)\n mom1 = (counts+counts_m)/2.0\n mom2 = (counts**2+counts_m*counts+counts_m**2)/3.0\n trials = np.sum(histdata[0], dtype=float)\n prob = histdata/trials\n \n hist_mean = np.array([np.dot(p,(mom1)) for p in prob])\n hist_2mean = np.array([np.dot(p,mom2) for p in prob])\n \n var = hist_2mean - hist_mean**2\n \n pmterr = np.sqrt(var)#/np.sqrt(trials)\n \n #scale avg_pmt_counts to make a bright state probability\n b_prob = (avg_pmt_counts - min_count)/(float(max_count - min_count))\n histextent = [-min_count/float(max_count-min_count),\n (num_hist_cols-min_count)/float(max_count-min_count)]\n pmterr = (pmterr)/(float(max_count - min_count))\n return file_name, scandata, b_prob, pmterr, histdata, histextent, xname \n\n\n\n\n################## This is old code that I'm working off of now but shouldn't be included #########\n\n\n\n\ndef histData_histplot(max_count=100, aspect_ratio=0.35, \n ptype='hist', x_unit='$\\mu$s'):\n #Get the data file name\n file_name = False\n for file in os.listdir(os.getcwd()):\n if file.startswith(\"histData.\") and file.endswith(\".csv\"):\n file_name = file\n if file_name == False: print(\"Did not find file\")\n else:\n print file_name\n #Get data from the file\n\n data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n columns = data.dtype.names\n non_hist_cols = columns.index('hist00') #defined by HFGUI expt type\n avg_pmt_col = columns.index('ave0')\n x,x_val,avg_pmt_counts = np.genfromtxt(file_name, \n unpack=True, \n skip_header=1, \n dtype=None, \n usecols=(1,2,avg_pmt_col), \n delimiter=',')\n data = np.genfromtxt(file_name,delimiter=\",\",names=True,dtype=None)\n xname = data.dtype.names[2]\n scandata = x_val\n\n num_scans = len(scandata[scandata == scandata[0]])\n points_in_scan = np.size(scandata)/num_scans\n print(\"Detect \" + str(num_scans) + \" scans in file\")\n print(\"and \" + str(points_in_scan) + \" points in a scan\")\n \n if num_scans > 1:\n #have to average the data together\n scandata = scandata.reshape((num_scans,points_in_scan))\n scandata = np.mean(scandata, axis=0)\n avg_pmt_counts = avg_pmt_counts.reshape((num_scans,points_in_scan))\n avg_pmt_counts = np.mean(avg_pmt_counts, axis = 0)\n \n #get just the histogram data\n num_cols_total = len(data[0])\n num_hist_cols = num_cols_total - non_hist_cols-1\n hist_data_cols = np.arange(non_hist_cols+1, non_hist_cols+num_hist_cols+1)\n histdata = np.genfromtxt(file_name,delimiter=\",\",names=None,\n skip_header = 1,dtype=None,usecols=hist_data_cols)\n if num_scans > 1:\n #accumulate in bins from all data sets\n accu_hist_data = np.array([histdata[i:i+points_in_scan] for i in range(num_scans)])\n histdata = np.sum(accu_hist_data,axis=0)\n \n #calc error bars from histogram data \n bins = np.arange(num_hist_cols)+0.5\n hist_mean = np.array([np.dot(i,bins)/np.sum(i) for i in histdata])\n hist_2mean = np.array([np.dot(i,bins**2)/np.sum(i) for i in histdata])\n pmterr = np.sqrt(hist_2mean - hist_mean**2)\n \n #fitting\n fit_flag = 0\n# pi_time = 78 #useconds\n# def meanfieldfit(rot_time, J):\n# theta = rot_time/pi_time*np.pi #radians\n# return 50*(1 + np.sin(theta)*np.sin(2*J*np.cos(theta))*2*arm_time)\n# \n# popt,pcov = optimize.curve_fit(meanfieldfit, scandata, avg_pmt_counts,\n# p0=[0.0001])\n# perr = np.sqrt(np.diag(pcov))\n# curve_fit = meanfieldfit(scandata, *popt) \n# fit_message = \"Params: %s\\n Std Dev: %s\"%(str(popt),str(perr))\n \n #create data plot\n ylabel = \"PMT counts\"\n #title = \"MF shift, $t_{arm}$:%d us, $t_{\\pi}$:%d us\"%(arm_time,pi_time)\n title = \"HF GUI scan, type: \" + ptype\n #histogram overlay\n if ptype == 'hist': \n plt.axis([np.min(scandata),np.max(scandata),0,max_count])\n plt.imshow(np.transpose(histdata), vmin=0, vmax=np.max(histdata), \n cmap=cm.Blues, origin='lower', aspect = aspect_ratio,\n extent=(np.min(scandata),np.max(scandata),0,num_hist_cols))#,aspect =0.2\n plt.plot(scandata,avg_pmt_counts, linestyle='solid',marker='.')\n \n plt.ylabel(ylabel)\n plt.xlabel(xname + ' [{}]'.format(x_unit))\n plt.title(title)\n \n if fit_flag:\n plt.plot(scandata, curve_fit, linestyle='solid')\n print(fit_message)\n \n filename_out = file_name[0:-4] + '_fig.png'\n plt.savefig(filename_out, format='png')\n \n #error bar type overlay\n elif ptype == 'errorbar': \n plt.axis([np.min(scandata),np.max(scandata),0,max_count])\n plt.errorbar(scandata,avg_pmt_counts, fmt='.', yerr=pmterr, elinewidth=1.0)\n plt.ylabel(ylabel)\n plt.xlabel(xname + ' [{}]'.format(x_unit))\n plt.title(title)\n if fit_flag:\n plt.plot(scandata, curve_fit, linestyle='solid')\n print(fit_message)\n \n filename_out = file_name[0:-4] + '_fig.png'\n plt.savefig(filename_out, format='png')\n\n\n" }, { "alpha_fraction": 0.6340270638465881, "alphanum_fraction": 0.6528615951538086, "avg_line_length": 32.141380310058594, "blob_id": "b55df5c51413c4066df6622595aa2e326065bece", "content_id": "176e2ebbe70398af92ea76929c95715bd07e423e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9610, "license_type": "no_license", "max_line_length": 83, "num_lines": 290, "path": "/Analysis/hfpy_readonly.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 13 15:39:18 2015\n\n@author: jgb\n\"\"\"\n\nimport numpy as np\nfrom numpy import pi, sin, cos\nimport matplotlib.pyplot as plt\n\nimport pickle, shutil, win32clipboard, gc, copy\n\ndef pLog(msg):\n\t\"\"\"\n\tpLog(msg)\n\t\tmsg is a string\n\n\tKeep an error log for calls to this library. Write to disk.\n\t\"\"\"\n\tfh = open('c:\\\\tmp\\\\hfpy.log','a')\n\ts = ''\n\t#s = s + time.ctime() + ' - '\n\ts = s + msg + '\\n'\n\tfh.write(s)\n\tfh.close()\n\t#put text in clipboard\n\twin32clipboard.OpenClipboard()\n\twin32clipboard.EmptyClipboard()\n\twin32clipboard.SetClipboardText(s)\n\twin32clipboard.CloseClipboard()\n\nclass hfpyExp:\n\t#\"\"\"\n\t#Experiment class holding pmt counts and parameters for a particular\n\t#iteration of an experiment. This experiment class holds several iterations\n\t#or repetitions \"reps\" of a particular experiment.\n\t#run_r = structure returned by hfgui's run('path%expName')\n\t#dopPhtoa = True if photon time of arrival data should be parsed\n\t#phtoaPMT1isSync = True if pmt(1) is used to synchronize each experiment\n\t#\trepetition with an external clock\n\t#phtoaCalcHist = True if a histogram should be calculated for each pmt ch\n\t#\tspanning all the experiment reps\n\t#var = whatever experiment parameter is being varied\n\t#fit = True if the hfgui fit was successful\n\t#\"\"\"\n\n\tdef __init__(self,run_r=None, doPhtoa=False, phtoaPMT1isSync=False,\\\n\t\tphtoaCalcHist=False, phtoaCalcHistNbins=20, \\\n\t\tphtoaRange=(0,3000),var=0, debug = 0, fit=0,dataDirName=''):\n\t\tif debug : pLog('DEBUG:hfpyExp:__init__')\n\t\tself.debug = debug\n\t\tself.doPhtoa = doPhtoa\n\t\tself.phtoaPMT1isSync = phtoaPMT1isSync\n\t\tself.phtoaCalcHist = phtoaCalcHist\n\t\tself.phtoaHistByPmtCh = [] #filled out if previous is true\n\t\tself.phtoaCalcHistNbins = phtoaCalcHistNbins\n\t\t#one time step for photon time of arrival is 16 ns; phtoaRange is in us\n\t\tself.phtoaCalcHistRange = np.array(phtoaRange)*1000.0/16.0\n\t\tself.phtoaKeepRaw = False #delete raw data to save memory\n\t\tself.dataDirName = dataDirName\n\t\t\n\t\tif(run_r!=None): #if not defined probably debugging things so skip constructor\n\t\t\t#run_r result from running an experiment\n\t\t\tself.raw = run_r\n\t\t\tif run_r.has_key('RawDetection'):\n\t\t\t\tself.rawdet = run_r['RawDetection']\n\t\t\t\t#self.counts... each exper is list of 16 counts, one per dGate\n\t\t\t\tself.counts = self.parseRawCounts(self.rawdet)\n\n\t\t\t#each rep will be a row in phtoa; columns are list of toa for each\n\t\t\t#pmt channel (16 total number); stuffed by self.parseraw()\n\t\t\tif self.doPhtoa & run_r.has_key('PhotonTimeOfArrival'):\t\n\t\t\t\tself.rawphtoa = run_r['PhotonTimeOfArrival']\n\t\t\t\tself.phtoa = self.parseRawPhtoa(self.rawphtoa)\n\t\t\t\tif self.phtoaKeepRaw == False:\n\t\t\t\t\tself.rawphtoa = ''\n\t\t\t\t\tgc.collect()\n\t\t\telse:\n\t\t\t\tself.rawphtoa = ''\n\t\t\t\tself.phtoa = None\n\t\t\t#v is the value of whatever is being swept for this exp\n\t\t\tself.var = var\n\t\t\tself.fit = copy.deepcopy(fit)\n\n\n\tdef __str__(self):\n\t\ts = ''\n\t\ts = s + 'var=' + str(self.var) + ','\n\t\ts = s + 'fit='+str(self.fit) + ','\n\t\ts = s + 'phtoa_raw=['+str(self.rawphtoa) + '],'\n\t\ts = s + 'phtoa='+str(self.phtoa) + ','\n\t\ts = s + 'counts_raw=['+str(self.rawdet) + '],'\n\t\ts = s + 'counts='+str(self.counts)\n\t\treturn s\n\n\tdef parseRawCounts(self, r):\n\t\tif self.debug == True : pLog('DEBUG:hfpyExp:parseRawCounts')\n\t\t#pLog('parseRawCounts '+str(r))\n\t\tcounts = []\n\t\tfor cs in r.rsplit(','):\n\t\t\t#pLog(' cs = ' + str(cs) )\n\t\t\tc = int(cs)\n\t\t\tif (c >> 17) == 0: #new experiment; successive detection bit no high\n\t\t\t\tcounts.append([0]*16)\n\t\t\telse : \n\t\t\t\t#pLog(' successive det bit' )\n\t\t\t\tpass\n\t\t\t#counts[last one][det bit] = count\n\t\t\tcounts[-1][ ((c & (0xf<<13))>>13)-1 ] = c & 0x1fff\n\t\treturn counts\n\n\tdef parseRawPhtoa(self,r):\n\t\t\"\"\"\n\t\tThis is not documented in CL's thesis. There is either a bug\n\t\tin the FPGA or in JWB's code that reads data from the FPGA\n\t\tthat causes truncation of the most significant bit of the\n\t\tsuccessive detection bit. The code below disentangles things.\n\t\thttps://847wiki2.bw.nist.gov/tiki-index.php?page=QC_DAQ_phtoa\n\n\t\tFor the programmer's convenience this code requires that the\n\t\tfirst detection event of each repetition of an experiment\n\t\tbe pmt(1).\n\n\t\tr is a string containing the raw data from the fpga\n\n\t\tmarker separating pmt channels is\n\t\t 2**24-16 + pmtchannelno = 16777200 + pmtchannelno\n\n\t\t\"\"\"\n\t\tif self.debug : pLog('DEBUG:hfpyExp:parseRawPhtoa')\n\n\t\tphtoa = []\n\t\tfirstpoint = True\n\t\tfor cs in r.rsplit(','):\n\t\t\tc = int(cs)\n\t\t\tif firstpoint:\n\t\t\t\tif c != 8388593 :\n\t\t\t\t\tpLog('DEBUG:ERROR: c != 8388593 ')\n\t\t\t\t\t1/0 #if first point is not 8388593 then first detection\n\t\t\t\t\t#in .dc file isn't pmt(1) and the code that follows will break\n\t\t\t\t\t#in insideous ways. instead just halt.\n\t\t\t\telse: #first point is as expected, proceed\n\t\t\t\t\tfirstpoint = False\n\t\t\t#first two markers in any experiment repetition have their\n\t\t\t#msb truncated so rather than testing for the 23rd bit being high\n\t\t\t#look for the 22nd bit as an indicator of a marker\n\t\t\tif c==8388593: #marks beginning of new experiment rep\n\t\t\t\tphtoa.append([[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]) \n\t\t\tif (1<<22&c)>>22: #found detection gate marker\n\t\t\t\tc = c | 1<<23 #this fixes possibly missing msb\n\t\t\t\tch = c&15 #mask off all but last 4 bits\n\t\t\telse: #must be photon toa number\n\t\t\t\tphtoa[-1][ch].append(c)\n\n\t\t#pmt(1) can be used to synchronize with an external clock\n\t\t#define t=0 to be the arrival time of the first pulse in pmt(1)\n\t\tif self.phtoaPMT1isSync:\n\t\t\tif self.debug : pLog('DEBUG:hfpyExp:parseRawPhtoa:if_phtoaPMT1isSync')\n\t\t\tfor rep in phtoa:\n\t\t\t\t#there is a different offset for each rep of the experiment\n\t\t\t\t#pLog( 'rep = %s \\n' % str(rep))\n\t\t\t\t#pLog( 'rep[8] = %s \\n' % str(rep[8]))\n\n\t\t\t\tif(len(rep[8])==0):\n\t\t\t\t\toffset = 0\n\t\t\t\t\tpLog( 'DEBUG:hfpyExp:parseRawPhtoa:if_phtoaPMT1isSync: sync signal missing!' )\n\t\t\t\telse:\n\t\t\t\t\toffset = rep[8][0] #first photon count for pmt20\n\t\t\t\t#pLog( 'DEBUG:hfpyExp:parseRawPhtoa:if_phtoaPMT1isSync:offset='+ str(offset) )\n\t\t\t\t#pLog( 'rep before = %s \\n' % str(rep))\n\t\t\t\tpmtchi = 2 #only care about time of arrival correction for pmt ch #2\n\t\t\t\tfor toai in range(len(rep[pmtchi])):\n\t\t\t\t\tif (rep[pmtchi][toai])> 0:\n\t\t\t\t\t\tif rep[pmtchi][toai] - offset < 0:\n\t\t\t\t\t\t\t#pLog( 'ERROR:hfpyExp:parseRawPhtoa:if_phtoaPMT1isSync: offset too big!' )\n\t\t\t\t\t\t\t#pLog(' pmt %d, photon no %d, toa %d, offset %d ' %\\\n\t\t\t\t\t\t\t#\t(pmtchi, toai, rep[pmtchi][toai], offset) )\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trep[pmtchi][toai] -= offset\n\t\t\t\t#pLog( 'rep after = %s \\n' % str(rep))\n\n\t\t#calculate and store histograms for this experiment spanning\n\t\t#all repetitions, one for each pmtch\n\t\tif self.phtoaCalcHist:\n\t\t\tif self.debug : pLog('DEBUG:hfpyExp:parseRawPhtoa:if_phtoaCalcHist')\n\t\t\t#first, consolidate all experiments by pmtch\n\t\t\ts = 'phtoa = %s\\n' % str(phtoa)\n\t\t\t#pLog(s)\n\t\t\tphtoaByPmtCh = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]\n\t\t\tfor rep in phtoa: #phtoa is a list summarizing several experiments\n\t\t\t\ts = ' rep = %s\\n' % str(rep)\n\t\t\t\t#pLog(s)\n\t\t\t\tfor pmtno, pmtphtoa in enumerate(rep):\n\t\t\t\t\tphtoaByPmtCh[pmtno].extend(pmtphtoa)\n\n\t\t\ts = 'phtoaByPmtCh = %s \\n' % str(phtoaByPmtCh)\n\t\t\t#pLog(s)\n\n\t\t\ts = 'maxphtoa by channel: '\n\t\t\tfor p in phtoaByPmtCh:\n\t\t\t\tpn = np.array(p)\n\t\t\t\tif pn != []:\n\t\t\t\t\ts = s + str(pn.max()) + ' '\n\t\t\t\telse:\n\t\t\t\t\ts = s + '0' + ' '\n\t\t\ts = s + '\\n'\n\t\t\tpLog('DEBUG: ' + s)\n\n\t\t\ts = 'phtoaByPmtCh = %s\\n' % str(phtoaByPmtCh)\n\t\t\t#pLog(s)\n\n\t\t\t#now calculate a histogram for each pmtch\n\t\t\tfor phchdat in phtoaByPmtCh: #loop 16 times\n\t\t\t\t(dbinned,edges) = \\\n\t\t\t\t\tnp.histogram(\n\t\t\t\t\t\tphchdat,\\\n\t\t\t\t\t\tbins=self.phtoaCalcHistNbins,\\\n\t\t\t\t\t\trange=self.phtoaCalcHistRange,\\\n\t\t\t\t\t\tnormed=False\\\n\t\t\t\t\t)\n\t\t\t\tself.phtoaHistByPmtCh.append([edges,dbinned])\n\t\t\t\ts = 'phchdat = %s \\n' % str(phchdat)\n\t\t\t\ts = s + ' dbinned: %s \\n edges: %s' % (dbinned,edges) + '\\n'\n\t\t\t\t#pLog(s)\n\t\t#end of if self.phtoaCalcHist:\n\n\t\t#print the parsed data\n\t\tif False:\n\t\t\trepn = 0\n\t\t\tpmtchn = 0\n\t\t\tfor rep in phtoa:\n\t\t\t\t#pLog('REP #'+ str(repn) )\n\t\t\t\tfor pmtch in rep:\n\t\t\t\t\t#pLog('PMTCH # ' + str(pmtchn) + ' ' + str(pmtch) )\n\t\t\t\t\tpmtchn+=1\n\t\t\t\trepn+=1\n\t\t\t\tpmtchn = 0\n\t\treturn phtoa\n\n\tdef catcounts(self,dGate=[0]):\n\t\t#concatonate raw counts from one or more detection gates\n\t\tif self.debug : pLog('DEBUG:hfpyExp:catcounts')\n\t\t#dGate is list of dGates\n\t\tc = [] #will be list of counts\n\t\tfor exper in self.counts: #each exper is list of 16 counts, one per dGate\n\t\t\tdgatesum = 0\n\t\t\tfor dg in dGate: #sum over photons collected in the dGates\n\t\t\t\tdgatesum += exper[dg]\n\t\t\tc.append(dgatesum)\n\t\treturn c\n\n\tdef mean(self,dGate=[0]):\n\t\tif self.debug : pLog('DEBUG:hfpyExp:mean')\n\t\t#calculate the histogram mean from counts spanning multiple dGates\n\t\t#dGate is list of dGates\n\t\tc = [] #will be list of counts\n\t\tfor exper in self.counts: #each exper is list of 16 counts, one per dGate\n\t\t\tsubtot = 0\n\t\t\tfor dg in dGate:\n\t\t\t\tsubtot += exper[dg]\n\t\t\tc.append(subtot)\n\t\tnc = np.array(c)\n\t\treturn nc.mean()\n\t\n\tdef stdv(self,dGate=[0]):\n\t\tif self.debug : pLog('DEBUG:hfpyExp:stdv')\n\t\t#calculate the histogram stdv from counts spanning multiple dGates\n\t\t#dGate is list of dGates\n\t\tc = [] #will be list of counts\n\t\tfor exper in self.counts: #each exper is list of 16 counts, one per dGate\n\t\t\tsubtot = 0\n\t\t\tfor dg in dGate:\n\t\t\t\tsubtot += exper[dg]\n\t\t\tc.append(subtot)\n\t\tnc = np.array(c)\n\t\treturn nc.std()\n\n\tdef halfMax(self,dGate):\n\t\tif self.debug : pLog('DEBUG:hfpyExp:halfMax')\n\t\t#calculate the sweep index for detect dGate (eg pmt0) which\n\t\t#corresponds to the 50% point of the maximum pmt count\n\t\tnc = np.array(self.counts)\n\t\tncp = nc[:,dGate] #raw data for specified dGate (eg pmt0)\n\t\tmax = ncp.max()\n\t\tfor di, d in enumerate(ncp):\n\t\t\tif d == max :\n\t\t\t\treturn di\n\t\treturn -1000" }, { "alpha_fraction": 0.5872802138328552, "alphanum_fraction": 0.6407433152198792, "avg_line_length": 32.125389099121094, "blob_id": "67dec7c462d6221ce88a875c5294d6c0595646c2", "content_id": "e0e02efe1d5029f0c7feb3ed4e16bbd12ccf26c3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 31966, "license_type": "no_license", "max_line_length": 902, "num_lines": 965, "path": "/Analysis/hfpy.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "import Gnuplot,copy, time, scipy, time, gc\n#import matplotlib.pyplot as pyplot\n#import pylab\nimport pickle, shutil, win32clipboard\nimport analysis as pena\nreload(pena)\nimport numpy as np\nimport numpy as numpy\n\nghfpypath = \"c:\\\\qc\\\\QC_DAQ\\\\src\\\\Drivers\\\\hfpy\"\n\n#from mpl_toolkits.axes_grid1 import make_axes_locatable\n#this class is for interaction between hfGUI3 and python\n\ngPlotFileName = 'c:\\\\tmp\\\\hfpy_out.png'\n\ndef testtest():\n\tgprint('this is a test')\n\ndef fullUniqueName(s):\n \t#returns a string like 'Demos%pysimple\\\\2011-03-09--18.49.23.9's\n\treturn s[s.rfind('\\\\',0,s.rfind('\\\\'))+1:-1]\n\ndef uniqueName(datadir):\n\t#returns a string like '2011-03-09--18.49.23.9'\n\treturn datadir[datadir.rfind('\\\\')+1:]\n\ndef pLog(msg):\n\t\"\"\"\n\tpLog(msg)\n\t\tmsg is a string\n\n\tKeep an error log for calls to this library. Write to disk.\n\t\"\"\"\n\tfh = open('c:\\\\tmp\\\\hfpy.log','a')\n\ts = ''\n\t#s = s + time.ctime() + ' - '\n\ts = s + msg + '\\n'\n\tfh.write(s)\n\tfh.close()\n\t#put text in clipboard\n\twin32clipboard.OpenClipboard()\n\twin32clipboard.EmptyClipboard()\n\twin32clipboard.SetClipboardText(s)\n\twin32clipboard.CloseClipboard()\n\nclass hfpyExp:\n\t#\"\"\"\n\t#Experiment class holding pmt counts and parameters for a particular\n\t#iteration of an experiment. This experiment class holds several iterations\n\t#or repetitions \"reps\" of a particular experiment.\n\t#run_r = structure returned by hfgui's run('path%expName')\n\t#dopPhtoa = True if photon time of arrival data should be parsed\n\t#phtoaPMT1isSync = True if pmt(1) is used to synchronize each experiment\n\t#\trepetition with an external clock\n\t#phtoaCalcHist = True if a histogram should be calculated for each pmt ch\n\t#\tspanning all the experiment reps\n\t#var = whatever experiment parameter is being varied\n\t#fit = True if the hfgui fit was successful\n\t#\"\"\"\n\n\tdef __init__(self,run_r=None, doPhtoa=False, phtoaPMT1isSync=False,\\\n\t\tphtoaCalcHist=False, phtoaCalcHistNbins=20, \\\n\t\tphtoaRange=(0,3000),var=0, debug = 0, fit=0,dataDirName=''):\n\t\tif debug : pLog('DEBUG:hfpyExp:__init__')\n\t\tself.debug = debug\n\t\tself.doPhtoa = doPhtoa\n\t\tself.phtoaPMT1isSync = phtoaPMT1isSync\n\t\tself.phtoaCalcHist = phtoaCalcHist\n\t\tself.phtoaHistByPmtCh = [] #filled out if previous is true\n\t\tself.phtoaCalcHistNbins = phtoaCalcHistNbins\n\t\t#one time step for photon time of arrival is 16 ns; phtoaRange is in us\n\t\tself.phtoaCalcHistRange = numpy.array(phtoaRange)*1000.0/16.0\n\t\tself.phtoaKeepRaw = False #delete raw data to save memory\n\t\tself.dataDirName = dataDirName\n\t\t\n\t\tif(run_r!=None): #if not defined probably debugging things so skip constructor\n\t\t\t#run_r result from running an experiment\n\t\t\tself.raw = run_r\n\t\t\tif run_r.has_key('RawDetection'):\n\t\t\t\tself.rawdet = run_r['RawDetection']\n\t\t\t\t#self.counts... each exper is list of 16 counts, one per dGate\n\t\t\t\tself.counts = self.parseRawCounts(self.rawdet)\n\n\t\t\t#each rep will be a row in phtoa; columns are list of toa for each\n\t\t\t#pmt channel (16 total number); stuffed by self.parseraw()\n\t\t\tif self.doPhtoa & run_r.has_key('PhotonTimeOfArrival'):\t\n\t\t\t\tself.rawphtoa = run_r['PhotonTimeOfArrival']\n\t\t\t\tself.phtoa = self.parseRawPhtoa(self.rawphtoa)\n\t\t\t\tif self.phtoaKeepRaw == False:\n\t\t\t\t\tself.rawphtoa = ''\n\t\t\t\t\tgc.collect()\n\t\t\telse:\n\t\t\t\tself.rawphtoa = ''\n\t\t\t\tself.phtoa = None\n\t\t\t#v is the value of whatever is being swept for this exp\n\t\t\tself.var = var\n\t\t\tself.fit = copy.deepcopy(fit)\n\n\n\tdef __str__(self):\n\t\ts = ''\n\t\ts = s + 'var=' + str(self.var) + ','\n\t\ts = s + 'fit='+str(self.fit) + ','\n\t\ts = s + 'phtoa_raw=['+str(self.rawphtoa) + '],'\n\t\ts = s + 'phtoa='+str(self.phtoa) + ','\n\t\ts = s + 'counts_raw=['+str(self.rawdet) + '],'\n\t\ts = s + 'counts='+str(self.counts)\n\t\treturn s\n\n\tdef parseRawCounts(self, r):\n\t\tif self.debug == True : pLog('DEBUG:hfpyExp:parseRawCounts')\n\t\t#pLog('parseRawCounts '+str(r))\n\t\tcounts = []\n\t\tfor cs in r.rsplit(','):\n\t\t\t#pLog(' cs = ' + str(cs) )\n\t\t\tc = int(cs)\n\t\t\tif (c >> 17) == 0: #new experiment; successive detection bit no high\n\t\t\t\tcounts.append([0]*16)\n\t\t\telse : \n\t\t\t\t#pLog(' successive det bit' )\n\t\t\t\tpass\n\t\t\t#counts[last one][det bit] = count\n\t\t\tcounts[-1][ ((c & (0xf<<13))>>13)-1 ] = c & 0x1fff\n\t\treturn counts\n\n\tdef parseRawPhtoa(self,r):\n\t\t\"\"\"\n\t\tThis is not documented in CL's thesis. There is either a bug\n\t\tin the FPGA or in JWB's code that reads data from the FPGA\n\t\tthat causes truncation of the most significant bit of the\n\t\tsuccessive detection bit. The code below disentangles things.\n\t\thttps://847wiki2.bw.nist.gov/tiki-index.php?page=QC_DAQ_phtoa\n\n\t\tFor the programmer's convenience this code requires that the\n\t\tfirst detection event of each repetition of an experiment\n\t\tbe pmt(1).\n\n\t\tr is a string containing the raw data from the fpga\n\n\t\tmarker separating pmt channels is\n\t\t 2**24-16 + pmtchannelno = 16777200 + pmtchannelno\n\n\t\t\"\"\"\n\t\tif self.debug : pLog('DEBUG:hfpyExp:parseRawPhtoa')\n\n\t\tphtoa = []\n\t\tfirstpoint = True\n\t\tfor cs in r.rsplit(','):\n\t\t\tc = int(cs)\n\t\t\tif firstpoint:\n\t\t\t\tif c != 8388593 :\n\t\t\t\t\tpLog('DEBUG:ERROR: c != 8388593 ')\n\t\t\t\t\t1/0 #if first point is not 8388593 then first detection\n\t\t\t\t\t#in .dc file isn't pmt(1) and the code that follows will break\n\t\t\t\t\t#in insideous ways. instead just halt.\n\t\t\t\telse: #first point is as expected, proceed\n\t\t\t\t\tfirstpoint = False\n\t\t\t#first two markers in any experiment repetition have their\n\t\t\t#msb truncated so rather than testing for the 23rd bit being high\n\t\t\t#look for the 22nd bit as an indicator of a marker\n\t\t\tif c==8388593: #marks beginning of new experiment rep\n\t\t\t\tphtoa.append([[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]) \n\t\t\tif (1<<22&c)>>22: #found detection gate marker\n\t\t\t\tc = c | 1<<23 #this fixes possibly missing msb\n\t\t\t\tch = c&15 #mask off all but last 4 bits\n\t\t\telse: #must be photon toa number\n\t\t\t\tphtoa[-1][ch].append(c)\n\n\t\t#pmt(1) can be used to synchronize with an external clock\n\t\t#define t=0 to be the arrival time of the first pulse in pmt(1)\n\t\tif self.phtoaPMT1isSync:\n\t\t\tif self.debug : pLog('DEBUG:hfpyExp:parseRawPhtoa:if_phtoaPMT1isSync')\n\t\t\tfor rep in phtoa:\n\t\t\t\t#there is a different offset for each rep of the experiment\n\t\t\t\t#pLog( 'rep = %s \\n' % str(rep))\n\t\t\t\t#pLog( 'rep[8] = %s \\n' % str(rep[8]))\n\n\t\t\t\tif(len(rep[8])==0):\n\t\t\t\t\toffset = 0\n\t\t\t\t\tpLog( 'DEBUG:hfpyExp:parseRawPhtoa:if_phtoaPMT1isSync: sync signal missing!' )\n\t\t\t\telse:\n\t\t\t\t\toffset = rep[8][0] #first photon count for pmt20\n\t\t\t\t#pLog( 'DEBUG:hfpyExp:parseRawPhtoa:if_phtoaPMT1isSync:offset='+ str(offset) )\n\t\t\t\t#pLog( 'rep before = %s \\n' % str(rep))\n\t\t\t\tpmtchi = 2 #only care about time of arrival correction for pmt ch #2\n\t\t\t\tfor toai in range(len(rep[pmtchi])):\n\t\t\t\t\tif (rep[pmtchi][toai])> 0:\n\t\t\t\t\t\tif rep[pmtchi][toai] - offset < 0:\n\t\t\t\t\t\t\t#pLog( 'ERROR:hfpyExp:parseRawPhtoa:if_phtoaPMT1isSync: offset too big!' )\n\t\t\t\t\t\t\t#pLog(' pmt %d, photon no %d, toa %d, offset %d ' %\\\n\t\t\t\t\t\t\t#\t(pmtchi, toai, rep[pmtchi][toai], offset) )\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\trep[pmtchi][toai] -= offset\n\t\t\t\t#pLog( 'rep after = %s \\n' % str(rep))\n\n\t\t#calculate and store histograms for this experiment spanning\n\t\t#all repetitions, one for each pmtch\n\t\tif self.phtoaCalcHist:\n\t\t\tif self.debug : pLog('DEBUG:hfpyExp:parseRawPhtoa:if_phtoaCalcHist')\n\t\t\t#first, consolidate all experiments by pmtch\n\t\t\ts = 'phtoa = %s\\n' % str(phtoa)\n\t\t\t#pLog(s)\n\t\t\tphtoaByPmtCh = [[],[],[],[],[],[],[],[],[],[],[],[],[],[],[],[]]\n\t\t\tfor rep in phtoa: #phtoa is a list summarizing several experiments\n\t\t\t\ts = ' rep = %s\\n' % str(rep)\n\t\t\t\t#pLog(s)\n\t\t\t\tfor pmtno, pmtphtoa in enumerate(rep):\n\t\t\t\t\tphtoaByPmtCh[pmtno].extend(pmtphtoa)\n\n\t\t\ts = 'phtoaByPmtCh = %s \\n' % str(phtoaByPmtCh)\n\t\t\t#pLog(s)\n\n\t\t\ts = 'maxphtoa by channel: '\n\t\t\tfor p in phtoaByPmtCh:\n\t\t\t\tpn = numpy.array(p)\n\t\t\t\tif pn != []:\n\t\t\t\t\ts = s + str(pn.max()) + ' '\n\t\t\t\telse:\n\t\t\t\t\ts = s + '0' + ' '\n\t\t\ts = s + '\\n'\n\t\t\tpLog('DEBUG: ' + s)\n\n\t\t\ts = 'phtoaByPmtCh = %s\\n' % str(phtoaByPmtCh)\n\t\t\t#pLog(s)\n\n\t\t\t#now calculate a histogram for each pmtch\n\t\t\tfor phchdat in phtoaByPmtCh: #loop 16 times\n\t\t\t\t(dbinned,edges) = \\\n\t\t\t\t\tnumpy.histogram(\n\t\t\t\t\t\tphchdat,\\\n\t\t\t\t\t\tbins=self.phtoaCalcHistNbins,\\\n\t\t\t\t\t\trange=self.phtoaCalcHistRange,\\\n\t\t\t\t\t\tnormed=False\\\n\t\t\t\t\t)\n\t\t\t\tself.phtoaHistByPmtCh.append([edges,dbinned])\n\t\t\t\ts = 'phchdat = %s \\n' % str(phchdat)\n\t\t\t\ts = s + ' dbinned: %s \\n edges: %s' % (dbinned,edges) + '\\n'\n\t\t\t\t#pLog(s)\n\t\t#end of if self.phtoaCalcHist:\n\n\t\t#print the parsed data\n\t\tif False:\n\t\t\trepn = 0\n\t\t\tpmtchn = 0\n\t\t\tfor rep in phtoa:\n\t\t\t\t#pLog('REP #'+ str(repn) )\n\t\t\t\tfor pmtch in rep:\n\t\t\t\t\t#pLog('PMTCH # ' + str(pmtchn) + ' ' + str(pmtch) )\n\t\t\t\t\tpmtchn+=1\n\t\t\t\trepn+=1\n\t\t\t\tpmtchn = 0\n\t\treturn phtoa\n\n\tdef catcounts(self,dGate=[0]):\n\t\t#concatonate raw counts from one or more detection gates\n\t\tif self.debug : pLog('DEBUG:hfpyExp:catcounts')\n\t\t#dGate is list of dGates\n\t\tc = [] #will be list of counts\n\t\tfor exper in self.counts: #each exper is list of 16 counts, one per dGate\n\t\t\tdgatesum = 0\n\t\t\tfor dg in dGate: #sum over photons collected in the dGates\n\t\t\t\tdgatesum += exper[dg]\n\t\t\tc.append(dgatesum)\n\t\treturn c\n\n\tdef mean(self,dGate=[0]):\n\t\tif self.debug : pLog('DEBUG:hfpyExp:mean')\n\t\t#calculate the histogram mean from counts spanning multiple dGates\n\t\t#dGate is list of dGates\n\t\tc = [] #will be list of counts\n\t\tfor exper in self.counts: #each exper is list of 16 counts, one per dGate\n\t\t\tsubtot = 0\n\t\t\tfor dg in dGate:\n\t\t\t\tsubtot += exper[dg]\n\t\t\tc.append(subtot)\n\t\tnc = numpy.array(c)\n\t\treturn nc.mean()\n\t\n\tdef stdv(self,dGate=[0]):\n\t\tif self.debug : pLog('DEBUG:hfpyExp:stdv')\n\t\t#calculate the histogram stdv from counts spanning multiple dGates\n\t\t#dGate is list of dGates\n\t\tc = [] #will be list of counts\n\t\tfor exper in self.counts: #each exper is list of 16 counts, one per dGate\n\t\t\tsubtot = 0\n\t\t\tfor dg in dGate:\n\t\t\t\tsubtot += exper[dg]\n\t\t\tc.append(subtot)\n\t\tnc = numpy.array(c)\n\t\treturn nc.std()\n\n\tdef halfMax(self,dGate):\n\t\tif self.debug : pLog('DEBUG:hfpyExp:halfMax')\n\t\t#calculate the sweep index for detect dGate (eg pmt0) which\n\t\t#corresponds to the 50% point of the maximum pmt count\n\t\tnc = numpy.array(self.counts)\n\t\tncp = nc[:,dGate] #raw data for specified dGate (eg pmt0)\n\t\tmax = ncp.max()\n\t\tfor di, d in enumerate(ncp):\n\t\t\tif d == max :\n\t\t\t\treturn di\n\t\treturn -1000\n\ndef fitToSin1(x,y,f_com = 1.0,phi0=0,debug=False):\n\tpLog('DEBUG:hfpyExp:fitToSin1')\n\t#fit to phase shift of velocimetry signal\n\t#phi0 is phase offset corresponding to the center of the resonance\n\tx = np.array(x)\n\ty = np.array(y)\n\t\n\tif debug: #do plotting only if in debug mode\n\t#plot data\n\t\tpylab.clf()\n\t\tpylab.plot(x,y,'-')\n\t\tpylab.ylim([y.min(),y.max()])\n\t\n\t#fit parameters\n\tbsc_C=0.3\n\tbsc_f=0.01\n\n\tfp_A = pena.cP(default=y.max()-y.min(),name='A',\\\n\t\t\t\t\tboundUpper=y.max()-y.min(),boundLower=10)\n\tfp_C = pena.cP(default=y.mean(),name='C',\\\n\t\t\t\t\tboundUpper=y.mean()*(1+bsc_C),boundLower=y.mean()*(1-bsc_C))\n\tfp_f = pena.cP(default=f_com,name='f',\\\n\t\t\t\t\tboundLower=f_com*(1-bsc_f),boundUpper=f_com*(1+bsc_f))\n\tfp_phi = pena.cP(default=0,name='phi',\\\n\t\t\t\t\tboundLower=0,boundUpper=2*np.pi)\n\tfp_tau = pena.cP(default=0.01,name='tau',\\\n\t\t\t\t\tboundUpper=0.1,boundLower=0.001)\n\tfps = [fp_A, fp_C, fp_f, fp_phi, fp_tau]\n\t\n\t#fit function\n\tdef fitfunc(x) : \n\t\t\treturn fp_A()*np.sin( 2*np.pi*fp_f()*x+fp_phi()+phi0)*np.exp(-1.0*x*fp_tau()) + fp_C()\n\txfit_points = 200 \n\txfit = np.arange(x.min(),x.max(),(x.max()-x.min())/xfit_points)\t\n\t\n\t#do the fit -- take best of several initial phases\n\tfit_ok = False; ret = []; vphi_range = 2.0*np.pi\n\tvphi_steps = 4; besti = 0; bestscore = 1e12\n\tfor i, vphi in enumerate(np.arange(0,vphi_range,vphi_range/vphi_steps)):\n\t\tfp_phi.default=vphi\n\t\tret.append( pena.cFit2(fitfunc, fps, x, y,debug=0) )\n\t\t#of the various starting phases which is best?\n\t\tscore = ret[i][1]\n\t\tif (score < bestscore) & (ret[i][3] == 0): \n\t\t\tbestscore = score; besti = i\n\t\tif debug: \n\t\t\ts = 'DEBUG:hfpyExp:fitToSin1 %d: vphi = %f, score = %f' % (i, vphi,score)\n\t\t\tpLog(s)\n\t\t\tpLog('%d: vphi = %f, score = %f' % (i, vphi,score) )\n\t\t\tpylab.plot(xfit,fitfunc(xfit),'--')\n\t\t\tpylab.ylim([y.min(),y.max()])\n\t\t\tpylab.draw()\n\t\t\t#raw_input('hit RETURN')\n\n\t#which fit was best??\n\tif ret[besti][3] == 0 : #fit is successful if equal to 0\n\t\tfor i,fp in enumerate(fps): \n\t\t\tfp.set(ret[besti][0][i]) \n\t\tif debug:\n\t\t\tpLog('FIT OK! (using %d with score %f)' % (besti,bestscore)\t)\n\t\t\tfor p in fps:\n\t\t\t\ts = 'DEBUG:hfpyExp:fitToSin1 %s' % (p)\n\t\t\t\tpLog(s)\n\t\t\tpylab.plot(xfit,fitfunc(xfit),'-',linewidth=3)\n\t\t\tpylab.ylim([y.min(),y.max()])\n\t\t\tpylab.draw()\n\t\t\tpylab.savefig(gPlotFileName)\n\t\tfit_ok = True \n\telse :\n\t\tif debug:\n\t\t\tpLog('DEBUG:hfpyExp:fitToSin1 FIT FAILED! (smode=%d)' % ret[besti][3])\n\t\tpass\n\t\n\tyfit = np.array(fitfunc(xfit))\n\tret = {\"fit\":fps,\"sample_x\":xfit,\"sample_y\":yfit}\n\treturn ret\n\ndef fitToSin1(x,y,f_com = 1.0,phi0=0,debug=False):\n\tpLog('DEBUG:hfpyExp:fitToSin1')\n\t#fit to phase shift of velocimetry signal\n\t#phi0 is phase offset corresponding to the center of the resonance\n\tx = np.array(x)\n\ty = np.array(y)\n\t\n\tif debug: #do plotting only if in debug mode\n\t#plot data\n\t\tpylab.clf()\n\t\tpylab.plot(x,y,'-')\n\t\tpylab.ylim([y.min(),y.max()])\n\t\n\t#fit parameters\n\tbsc_C=0.3\n\tbsc_f=0.01\n\n\tfp_A = pena.cP(default=y.max()-y.min(),name='A',\\\n\t\t\t\t\tboundUpper=y.max()-y.min(),boundLower=10)\n\tfp_C = pena.cP(default=y.mean(),name='C',\\\n\t\t\t\t\tboundUpper=y.mean()*(1+bsc_C),boundLower=y.mean()*(1-bsc_C))\n\tfp_f = pena.cP(default=f_com,name='f',\\\n\t\t\t\t\tboundLower=f_com*(1-bsc_f),boundUpper=f_com*(1+bsc_f))\n\tfp_phi = pena.cP(default=0,name='phi',\\\n\t\t\t\t\tboundLower=0,boundUpper=2*np.pi)\n\tfp_tau = pena.cP(default=0.01,name='tau',\\\n\t\t\t\t\tboundUpper=0.1,boundLower=0.001)\n\tfps = [fp_A, fp_C, fp_f, fp_phi, fp_tau]\n\t\n\t#fit function\n\tdef fitfunc(x) : \n\t\t\treturn fp_A()*np.sin( 2*np.pi*fp_f()*x+fp_phi()+phi0)*np.exp(-1.0*x*fp_tau()) + fp_C()\n\txfit_points = 200 \n\txfit = np.arange(x.min(),x.max(),(x.max()-x.min())/xfit_points)\t\n\t\n\t#do the fit -- take best of several initial phases\n\tfit_ok = False; ret = []; vphi_range = 2.0*np.pi\n\tvphi_steps = 4; besti = 0; bestscore = 1e12\n\tfor i, vphi in enumerate(np.arange(0,vphi_range,vphi_range/vphi_steps)):\n\t\tfp_phi.default=vphi\n\t\tret.append( pena.cFit2(fitfunc, fps, x, y,debug=0) )\n\t\t#of the various starting phases which is best?\n\t\tscore = ret[i][1]\n\t\tif (score < bestscore) & (ret[i][3] == 0): \n\t\t\tbestscore = score; besti = i\n\t\tif debug: \n\t\t\ts = 'DEBUG:hfpyExp:fitToSin1 %d: vphi = %f, score = %f' % (i, vphi,score)\n\t\t\tpLog(s)\n\t\t\tpLog('%d: vphi = %f, score = %f' % (i, vphi,score) )\n\t\t\tpylab.plot(xfit,fitfunc(xfit),'--')\n\t\t\tpylab.ylim([y.min(),y.max()])\n\t\t\tpylab.draw()\n\t\t\t#raw_input('hit RETURN')\n\n\t#which fit was best??\n\tif ret[besti][3] == 0 : #fit is successful if equal to 0\n\t\tfor i,fp in enumerate(fps): \n\t\t\tfp.set(ret[besti][0][i]) \n\t\tif debug:\n\t\t\tpLog('FIT OK! (using %d with score %f)' % (besti,bestscore)\t)\n\t\t\tfor p in fps:\n\t\t\t\ts = 'DEBUG:hfpyExp:fitToSin1 %s' % (p)\n\t\t\t\tpLog(s)\n\t\t\tpylab.plot(xfit,fitfunc(xfit),'-',linewidth=3)\n\t\t\tpylab.ylim([y.min(),y.max()])\n\t\t\tpylab.draw()\n\t\t\tpylab.savefig(gPlotFileName)\n\t\tfit_ok = True \n\telse :\n\t\tif debug:\n\t\t\tpLog('DEBUG:hfpyExp:fitToSin1 FIT FAILED! (smode=%d)' % ret[besti][3])\n\t\tpass\n\t\n\tyfit = np.array(fitfunc(xfit))\n\tret = {\"fit\":fps,\"sample_x\":xfit,\"sample_y\":yfit}\n\treturn ret\n\t\t\n###############\n###############\n###############\nclass hfpySweep:\n\t\"\"\"\n\tHold and process data collected from a number of experiments.\n\t\"\"\"\n\tdef __init__(self, title='', xname='',datadir=None,debug=0):\n\t\tself.exps = [] #list of a bunch of experiments\n\t\tself.title = title #experiment title\n\t\tself.xname = xname #name of x-axis\n\t\tself.debug = debug\n\t\tself.datadir = datadir\n\t\tself.dataDirUniqueName = self.datadir[self.datadir.rfind('\\\\')+1:-1]\n\t\tif debug : pLog('DEBUG:hfpySweep:__init__')\n\t\t#Check and see if there is an existing plot of this sort.\n\t\t#If so reuse it. If not create it.\n\t\t#try:\n\t\t#\tself.gp\n\t\t#except NameError:\n\t\t#\tself.gp = None\n\t\t#if self.gp is None:\n\t\tself.gp = Gnuplot.Gnuplot()\n\t\tself.gp('set terminal windows position 1200,600')\n\t\tself.gp('set terminal windows size 600,500')\n\n\t\t#9/2010 JWB setup alternative to plotting using gnuplot\n\t\t#see hfpy/showplot.py... it just displays figures written to disk\n\t\tself.plotfn = gPlotFileName\n\n\tdef __del__(self):\n\t\tself.saveRawExpDataToDisk()\n\n\tdef __str__(self):\n\t\ts = ''\n\t\ts = s + 'hfpySweep data follows'\n\t\ts = s + 'title=' + self.title + ','\n\t\ts = s + 'xname=' + self.xname + ','\n\t\ts = s + 'datadir=' + self.datadir\n\t\ts = s + '\\n'\n\t\tfor e in self.exps:\n\t\t\ts = s + str(e) + '\\n'\n\t\treturn s\n\n\tdef saveToDisk(self):\n\t\tself.saveSweepToDisk()\n\n\tdef saveSweepToDisk(self):\n\t\t#save raw data to disk in case\n\t\t#when using pickle need access to the data structures so back them up too\n\t\tfpath0 = ghfpypath + '/hfpy.py'\n\t\tfpath1 = self.datadir + '\\\\'+self.dataDirUniqueName + '_hfpy.py'\n\t\tshutil.copy(fpath0,fpath1)\n\t\tfpath2 = self.datadir + '\\\\'+self.dataDirUniqueName + '_raw.pickle'\n\t\tfh = open(fpath2,'w')\n\t\tpickle.dump(self.exps,fh)\n\t\tfh.close()\n\t\t#to recover use the following\n\t\t#import pickle\n\t\t#fh = open(path_to_pickle,'r')\n\t\t#p = pickle.load(fh)\n\t\t#fh.close()\n\t\n\n\tdef appendExp(self, exp):\n\t\tif self.debug : pLog('DEBUG:hfpySweep:appendExp')\n\t\tself.exps.append(exp)\n\n\tdef plotMean(self,pmtNum=[0]):\n\t\tif self.debug : pLog('DEBUG:hfpySweep:plotMean')\n\t\tdataDirUniqueName = self.datadir[self.datadir.rfind('\\\\')+1:-1]\n\t\tplotTitle = self.title + ' ('+dataDirUniqueName+')'\n\t\t#setup plot\n\t\tif False: #plot using gnuplot\n\t\t\tself.gp.title(plotTitle)\n\t\t\txlab = 'set xlabel \\\"' + self.xname + '\\\"'\n\t\t\tself.gp(xlab)\n\t\t\tself.gp(\"\"\"set ylabel \"Mean Counts\" \"\"\")\n\t\t\tself.gp('set style data linespoints')\n\t\t\tg_m=[]\n\t\t\tfor exp in self.exps:\n\t\t\t\tg_m.append([exp.var,exp.mean(pmtNum)])\n\t\t\tself.gp.plot(g_m)\n\t\tif True: #plot using disk file\n\t\t\tx=[];y=[]\n\t\t\tfor exp in self.exps:\n\t\t\t\tx.append(exp.var)\n\t\t\t\ty.append(exp.mean(pmtNum))\n\t\t\tpylab.clf()\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.title(plotTitle)\n\t\t\tpylab.savefig(self.plotfn)\n\n\tdef plotHalfMax(self,pmtNum=[0]):\n\t\tif self.debug : pLog('DEBUG:hfpySweep:plotMean')\n\t\t#setup plot\n\t\tdataDirUniqueName = self.datadir[self.datadir.rfind('\\\\')+1:-1]\n\t\tplotTitle = self.title + ' ('+dataDirUniqueName+')'\n\t\tself.gp.title(plotTitle)\n\t\txlab = 'set xlabel \\\"' + self.xname + '\\\"'\n\t\tself.gp(xlab)\n\t\tself.gp(\"\"\"set ylabel \"HalfMax\" \"\"\")\n\t\tself.gp('set style data linespoints')\n\t\tg_m=[]\n\t\tfor exp in self.exps:\n\t\t\tg_m.append([exp.var,exp.halfMax(pmtNum)])\n\t\tself.gp.plot(g_m)\n\n\tdef plotStdDev(self,pmtNum=0):\n\t\tif self.debug : pLog('DEBUG:hfpySweep:plotStdDev')\n\t\t#setup plot\n\t\tdataDirUniqueName = self.datadir[self.datadir.rfind('\\\\')+1:-1]\n\t\tplotTitle = self.title + ' ('+dataDirUniqueName+')'\n\t\tself.gp.title(plotTitle)\n\t\txlab = 'set xlabel \\\"' + self.xname + '\\\"'\n\t\tself.gp(xlab)\n\t\tself.gp(\"\"\"set ylabel \"Count StdDev\" \"\"\")\n\t\tself.gp('set style data linespoints')\n\t\tg_m=[]\n\t\tfor exp in self.exps:\n\t\t\tg_m.append([exp.var,exp.mean(pmtNum)])\n\t\tself.gp.plot(g_m)\n\n\tdef plotPhtoa(self, pmtch=0, saveToDisk=False, f_com_est=-1):\n\t\t\"\"\"\n\t\tGenerate heat map for photon time of arrival. Requires that\n\t\tphtoaCalcHist == True for each of the constitutent experiments\n\n\t\tpmtch is the pmtchannel whose histogram should be plotted\n\t\tcall with saveToDisk==True at end of sweep to save data to disk\n\t\t\"\"\"\n\t\tif self.debug : pLog('DEBUG:hfpySweep:plotPhtoa')\n\n\t\t##################\n\t\t###setup plot data\n\t\tplotTitle = self.title + ' ('+self.dataDirUniqueName+')'\n\t\tylab = \"f_tickle (kHz)\"\n\t\txlab = \"Photon Time of Arrival (us)\"\n\n\t\tx = self.exps[0].phtoaHistByPmtCh[pmtch][0][0:-1]\n\t\tx = numpy.array(x)\n\t\tx = x*16.0/1000.0 #time in us\n\t\t#pLog('x=%s\\n' % str(x))\n\t\ty=[]\n\t\thists = [] #collection of histograms to be plotted in heatmap\n\t\tfor exp in self.exps:\n\t\t\thists.append( exp.phtoaHistByPmtCh[pmtch][1] )\n\t\t\ty.append(exp.var*1000.0) #in kHz\n\t\ty=numpy.array(y)\n\t\ty=y/1000.0 #to keep things in kHz\n\t\t#pLog('y=%s\\n' % str(y))\n\n\t\t##########################################\n\t\t###simple 1-d plot of latest data with fit\n\t\tif False:\n\t\t\tlastexp = self.exps[-1]\n\t\t\ty=lastexp.phtoaHistByPmtCh[pmtch][1]\n\t\t\t#pLog('y = %s \\n' % str(y))\n\t\t\t#pylab.clf()\n\t\t\t#pylab.plot(x,y)\n\t\t\t#pylab.savefig(self.plotfn)\n\t\t\tfitToSin1(x,y,f_com=0.540,debug=False)\n\t\t\treturn\n\n\t\t#################################\n\t\t###simple 1-d plot of latest data\n\t\tif False:\n\t\t\tlastexp = self.exps[-1]\n\t\t\ty=lastexp.phtoaHistByPmtCh[pmtch][1]\n\t\t\tpLog('y = %s \\n' % str(y))\n\t\t\tpylab.clf()\n\t\t\tpylab.plot(x,y)\n\t\t\tpylab.savefig(self.plotfn)\n\t\t\treturn\n\n\t\t#################\n\t\t###fancy 2-d plot\n\t\tif False:\n\t\t\thistsn = numpy.array(hists)\n\t\t\thistsnT = histsn.transpose()\n\t\t\tpylab.clf()\n\n\t\t\t#determine plot aspect ratio so it fills full window\n\t\t\tpe = [float(x.min()),float(x.max()),\\\n\t\t\t\t float(y.min()),float(y.max())] #plot extent x0, x1, y0, y1\n\t\t\t#pLog('pe = %s\\n'%str(pe))\n\t\t\tif pe[3]!=pe[2]:\n\t\t\t\taspect = (pe[1]-pe[0])/(pe[3]-pe[2])\n\t\t\telse:\n\t\t\t\taspect = 1\n\t\t\t#pLog('aspect = %s\\n'%str(aspect))\n\n\t\t\tif aspect != 1: #don't plot first time\n\t\t\t\t#pylab.imshow(numpy.flipud(histsn), interpolation='bilinear', aspect=aspect,extent=pe)\n\t\t\t\tpylab.imshow(numpy.flipud(histsn), interpolation='nearest',aspect=aspect,extent=pe)\n\t\t\t\tpylab.grid()\n\t\t\t\tpylab.colorbar()\n\t\t\t\tpylab.title(plotTitle)\n\t\t\t\tpylab.xlabel(xlab)\n\t\t\t\tpylab.ylabel(ylab)\n\t\t\t\tpylab.savefig(self.plotfn)\n\n\t\t####################\n\t\t## super fancy plot\n\t\t#from expamples... http://matplotlib.sourceforge.net/plot_directive/mpl_toolkits/axes_grid/examples/scatter_hist.py\n\t\tif True:\n\t\t\thistsn = numpy.array(hists)\n\t\t\thistsnT = histsn.transpose()\n\t\t\tpylab.clf()\n\n\t\t\t#determine plot aspect ratio so it fills full window\n\t\t\tpe = [float(x.min()),float(x.max()),\\\n\t\t\t\t float(y.min()),float(y.max())] #plot extent x0, x1, y0, y1\n\t\t\t#pLog('pe = %s\\n'%str(pe))\n\t\t\tif pe[3]!=pe[2]:\n\t\t\t\taspect = (pe[1]-pe[0])/(pe[3]-pe[2])\n\t\t\telse:\n\t\t\t\taspect = 1\n\t\t\t#pLog('aspect = %s\\n'%str(aspect))\n\t\t\t#pLog('pe = %s\\n'%str(pe))\n\n\t\t\tif aspect != 1: #don't plot first time\n\t\t\t\tpylab.clf()\n\t\t\t\tfig = pyplot.figure(1, figsize=(5.5,5.5))\n\t\t\t\tfig.suptitle(plotTitle)\n\n\t\t\t\tax11 = pyplot.subplot(223)\n\t\t\t\tax11.set_xlabel(xlab)\n\t\t\t\tax11.set_ylabel(ylab)\n\n\t\t\t\t#pylab.setp( ax11.get_xticklabels(), visible=False)\n\n\t\t\t\t##divider = make_axes_locatable(ax11)\n\t\t\t\t#\n\t\t\t\t##ax21 = divider.append_axes(\"top\", 1.1, pad=0.1, \\\n\t\t\t\t##\tframeon=False)\n\t\t\t\tax21 = pyplot.subplot(221,frameon=True)\n\t\t\t\tif f_com_est <= 0: #don't bother with fit\n\t\t\t\t\tax21.plot(x,numpy.flipud(histsn)[1,:])\n\t\t\t\tif f_com_est > 0 : #then do fit\n\t\t\t\t\ty = numpy.flipud(histsn)[1,:]\n\t\t\t\t\trets = fitToSin1(x,y,f_com = f_com_est/1000.0,phi0=0,debug=False)\n\t\t\t\t\tsx = rets[\"sample_x\"]\n\t\t\t\t\tsy = rets[\"sample_y\"]\n\t\t\t\t\tax21.plot(x,numpy.flipud(histsn)[1,:])\n\t\t\t\t\tax21.plot(sx,sy)\n\t\t\t\t#pylab.setp( ax21.get_yticklabels(), visible=False)\n\t\t\t\tpylab.setp( ax21.get_xticklabels(), visible=False)\n\t\t\t\tax21.get_xaxis().set_ticks([])\n\t\t\t\t#ax21.get_yaxis().set_ticks([])\n\t\t\t\t\n\t\t\t\t#plot fits \n\t\t\t\t##ax12 = divider.append_axes(\"right\", 1.1, pad=0.1, \\\n\t\t\t\t##\tframeon=False)\n\t\t\t\tax12 = pyplot.subplot(224,frameon=True)\n\t\t\t\tif f_com_est <= 0: #don't bother with fit\n\t\t\t\t\tax12.plot(histsn.std(axis=1),y)\n\t\t\t\t\tax12.plot(histsn.mean(axis=1),y)\n\t\t\t\tif f_com_est > 0 : #then do fit\n\t\t\t\t\tpass\n\t\t\t\tpylab.setp( ax12.get_yticklabels(), visible=False)\n\t\t\t\tpylab.setp( ax12.get_xticklabels(), visible=False)\n\t\t\t\tax12.get_xaxis().set_ticks([])\n\t\t\t\tax12.get_yaxis().set_ticks([])\n\n\t\t\t\t#pylab.subplots_adjust(wspace=0,hspace=0)\n\n\t\t\t\tax11.imshow(numpy.flipud(histsn), interpolation='bilinear',\\\n\t\t\t\t\textent=pe,aspect=aspect)\n\n\t\t\t\tpylab.savefig(self.plotfn)\n\n\t\tif saveToDisk == True:\n\n\t\t\t#write png to disk\n\t\t\tpylab.clf()\n\t\t\thistsn = numpy.array(hists)\n\t\t\thistsnT = histsn.transpose()\n\n\t\t\t#determine plot aspect ratio so it fills full window\n\t\t\tpe = [float(x.min()),float(x.max()),\\\n\t\t\t\t float(y.min()),float(y.max())] #plot extent x0, x1, y0, y1\n\t\t\t#pLog('pe = %s\\n'%str(pe))\n\t\t\tif pe[3]!=pe[2]:\n\t\t\t\taspect = (pe[1]-pe[0])/(pe[3]-pe[2])\n\t\t\telse:\n\t\t\t\taspect = 1\n\t\t\tpylab.imshow(numpy.flipud(histsn), interpolation='bilinear',aspect=aspect,extent=pe)\n\t\t\tpylab.grid()\n\t\t\tpylab.colorbar()\n\t\t\tpylab.title(plotTitle)\n\t\t\tpylab.xlabel(xlab)\n\t\t\tpylab.ylabel(ylab)\n\t\t\tpylab.savefig(self.plotfn)\n\t\t\tfpath = self.datadir + '\\\\'+self.dataDirUniqueName + '.png'\n\t\t\tpylab.savefig(fpath)\n\n\t\t\t#write plot data to disk\n\t\t\tfpath = self.datadir + '\\\\'+self.dataDirUniqueName + '.csv'\n\t\t\t#pLog('fpath = %s\\n' %fpath )\n\t\t\ts = '#hfpy:plotPhtoa data out -- ' + self.dataDirUniqueName + '\\n'\n\t\t\ts = s + '#photon arrival time (us), f_ticke (kHz), number of counts in hist \\n'\n\t\t\tfor xi,xdat in enumerate(x):\n\t\t\t\tfor yi,ydat in enumerate(y):\n\t\t\t\t\tst = '%f,%f,%f\\n' %(xdat,ydat,histsnT[xi,yi])\n\t\t\t\t\t#pLog('%i,%i s=%s\\n' % (xi,yi,st))\n\t\t\t\t\ts = s + st\n\t\t\tfh = open(fpath,'w')\n\t\t\tfh.write(s)\n\t\t\tfh.close()\n\n\tdef plotPhaseFlopContrast(self):\n\t\t\"\"\"\n\t\tExperiment where the signal is fit parameters fit_A and fit_B of the\n\t\tFlop fit type in the HFgui. It is used by the following experiments.\n\t\t1) ramseyBatch\n\t\t2) SEphaseFlopBatch\n\t\t\"\"\"\n\t\tif self.debug : pLog('DEBUG:hfpySweep:plotRamseyContrast')\n\t\t#setup plot\n\t\tdataDirUniqueName = self.datadir[self.datadir.rfind('\\\\')+1:-1]\n\t\tplotTitle = self.title + ' ('+dataDirUniqueName+')'\n\t\tself.gp.title(plotTitle)\n\t\tylab = 'set ylabel \\\"Counts\\\"'\n\t\tself.gp(ylab)\n\t\txlab = 'set xlabel \\\"' + self.xname + '\\\"'\n\t\tself.gp(xlab)\n\t\tself.gp('set style data points')\n\t\tddA=[]\n\t\tddB=[]\n\t\tddPhase=[]\n\t\ts=''\n\t\tfor exp in self.exps: #vary parameter exp.var for each experiment\n\t\t\tif exp.fit == None:\n\t\t\t\tddA.append([exp.var,0])\n\t\t\t\tddB.append([exp.var,0])\n\t\t\t\tddPhase.append([exp.var,0])\n\t\t\telse:\n\t\t\t\tddA.append([exp.var,exp.fit.A])\n\t\t\t\tddB.append([exp.var,exp.fit.B])\n\t\t\t\tddPhase.append([exp.var,(exp.fit.phi+3.14)/(2*3.14)*100.0])\n\t\tself.gp('set grid')\n\t\tself.gp('set pointsize 2.5')\n\t\tself.gp('set pt *')\n\t\tself.gp.plot(ddA,ddB,ddPhase)\n\n\t\t#write png to disk\n\t\tself.gp('set terminal png')\n\t\tfpath = self.datadir + '\\\\phaseFlopContrast_'+self.dataDirUniqueName + '.png'\n\t\tpLog('fpath=%s\\n' %fpath)\n\t\tself.gp('set output \\\"'+ fpath +'\\\"')\n\t\tself.gp.plot(ddA,ddB,ddPhase)\n\t\tself.gp('unset output')\n\t\tself.gp('set terminal windows')\n\n\t\t#write plot data to disk\n\t\tfpath = self.datadir + '\\\\phaseFlopContrast_'+self.dataDirUniqueName+'.csv'\n\t\tfh = open(fpath,'w')\n\t\tfh.write('#plotRamseyContrast data (' + self.dataDirUniqueName + ')\\n' )\n\t\tfh.write('fp_t (us), fit_A, fit_B, phi\\n')\n\t\tfor exp in self.exps:\n\t\t\tif exp.fit: #drop data points where the fit fails (is None)\n\t\t\t\ts = '%d,%f,%f,%f\\n' % (exp.var, exp.fit.A, exp.fit.B,exp.fit.phi/(2*3.14)*360)\n\t\t\t\tfh.write(s)\n\t\tfh.close()\n\n\tdef saveSubExpUniqueNames(self):\n\t\t#write plot data to disk\n\t\tfpath = self.datadir + '\\\\subExpUniqueNames_'+self.dataDirUniqueName+'.csv'\n\t\tfh = open(fpath,'w')\n\t\tfh.write('#subExpUniqueNames\\n' )\n\t\tfh.write('#exp.var, exp.dataDirName\\n')\n\t\tfor exp in self.exps:\n\t\t\ts = '%d, %s, %s\\n' % (exp.var, uniqueName(exp.dataDirName),exp.dataDirName)\n\t\t\tfh.write(s)\n\t\tfh.close()\n\n\tdef plotLC(self):\n\t\t\"\"\"\n\t\tasdfasdf\n\t\tasdf\n\t\tasdf\n\t\tExperiment where the signal is fit parameters fit_A and fit_B of the\n\t\tFlop fit type in the HFgui. It is used by the following experiments.\n\t\t1) ramseyBatch\n\t\t2) SEphaseFlopBatch\n\t\t\"\"\"\n\t\tif self.debug : pLog('DEBUG:hfpySweep:plotRamseyContrast')\n\t\t#setup plot\n\t\tdataDirUniqueName = self.datadir[self.datadir.rfind('\\\\')+1:-1]\n\t\tplotTitle = self.title + ' ('+dataDirUniqueName+')'\n\t\tself.gp.title(plotTitle)\n\t\tylab = 'set ylabel \\\"Counts\\\"'\n\t\tself.gp(ylab)\n\t\txlab = 'set xlabel \\\"' + self.xname + '\\\"'\n\t\tself.gp(xlab)\n\t\tself.gp('set style data points')\n\t\tddA=[]\n\t\tddB=[]\n\t\tddPhase=[]\n\t\ts=''\n\t\tfor exp in self.exps: #vary parameter exp.var for each experiment\n\t\t\tif exp.fit == None:\n\t\t\t\tddA.append([exp.var,0])\n\t\t\t\tddB.append([exp.var,0])\n\t\t\telse:\n\t\t\t\tddA.append([exp.var,exp.fit.x0])\n\t\t\t\tddB.append([exp.var,exp.fit.w])\n\t\tself.gp('set grid')\n\t\tself.gp('set pointsize 2.5')\n\t\tself.gp('set pt *')\n\t\tself.gp.plot(ddA,ddB)\n\n\t\t#write png to disk\n\t\tself.gp('set terminal png')\n\t\tfpath = self.datadir\n\t\tfpath = fpath.replace('\\\\','\\\\\\\\')+'\\\\\\\\phaseFlopContrast_'+dataDirUniqueName+'.png'\n\t\tself.gp('set output \\\"'+ fpath +'\\\"')\n\t\tself.gp.plot(ddA,ddB)\n\t\tself.gp('unset output')\n\t\tself.gp('set terminal windows')\n\n\t\t#write plot data to disk\n\t\tfpath = self.datadir + '\\\\phaseFlopContrast_'+dataDirUniqueName+'.csv'\n\t\tfh = open(fpath,'w')\n\t\tfh.write('#plotRamseyContrast data (' + dataDirUniqueName + ')\\n' )\n\t\tfh.write('fp_t (us), fit_A, fit_B\\n')\n\t\tfor exp in self.exps:\n\t\t\tif exp.fit: #drop data points where the fit fails (is None)\n\t\t\t\ts = '%d,%f,%f\\n' % (exp.var, exp.fit.x0, exp.fit.w)\n\t\t\t\tfh.write(s)\n\t\tfh.close()\n\n###############################################################\n###############################################################\n########### FAKE RUNTIME DATASETS #############################\n###############################################################\n###############################################################\n#Here are some debug routines for these classes they use\n#mock data; to use this set the global variable debug\ndebug = False\nif debug:\n#######################################################\n### demonstrate photon time of arrival features\n#######################################################\n\n\t#mock data generated with 100 kHz TTL applied to both pmt inputs\n\t#NumExp = 3 ScanPoints = 1 NumScans = 1\n\t#Following is the detection related excerpt from the .dc file\n\t#\n\t#//pulse det10 20; //this pmtch doesn't work!\n\t# pulse det11 20;\n\t# pulse det12 20;\n\t# pulse det13 20;\n\t# pulse det14 20;\n\t# pulse det13 20;\n\t# pulse det15 20;\n\t# pulse det13 20;\n\t# pulse det16 20;\n\t# pulse det17 20;\n\t# pulse det20 20;\n\t# pulse det21 20;\n\t# pulse det22 20;\n\t# pulse det23 20;\n\t# pulse det24 20;\n\t# pulse det25 20;\n\t# pulse det26 20;\n\t# pulse det27 20;\n\n\t#here's a copy of the raw data generated\n\tphtoaSampleDataSetA = '8388593,124,749,8388594,124,749,16777203,124,749,16777204,124,749,16777203,124,749,16777205,124,749,16777203,124,749,16777206,124,749,16777207,124,749,16777208,124,749,16777209,125,749,16777210,124,749,16777211,124,749,16777212,124,749,16777213,124,749,16777214,124,749,16777215,124,749,8388593,493,1118,8388594,493,1118,16777203,493,1118,16777204,493,1118,16777203,493,1118,16777205,493,1118,16777203,493,1118,16777206,493,1118,16777207,493,1118,16777208,494,1118,16777209,494,1118,16777210,494,1118,16777211,494,1118,16777212,493,1119,16777213,494,1119,16777214,493,1118,16777215,494,1118,8388593,237,863,8388594,237,863,16777203,237,863,16777204,237,863,16777203,237,863,16777205,237,863,16777203,237,863,16777206,238,862,16777207,237,863,16777208,238,863,16777209,238,863,16777210,238,863,16777211,238,863,16777212,238,863,16777213,238,863,16777214,238,863,16777215,238,863'\n\trawDetectionSampleDataSetA = '8194,147458,155650,163842,155650,172034,155650,180226,188418,196610,204802,212994,221186,229378,237570,245762,253954,8194,147458,155650,163842,155650,172034,155650,180226,188418,196610,204802,212994,221186,229378,237570,245762,253954,8194,147458,155650,163842,155650,172034,155650,180226,188418,196610,204802,212994,221186,229378,237570,245762,253954'\n\n\t#here's a mockup of the data directory\n\tdatadirname = 'c:\\\\tmp\\2010-XX-XX--XX.XX.XX.XXX'\n\t#TODO create actual directory and set datadir name correctly\n\n\t#here's a mockup of the structure that gets returned by a call like\n\t# ret = run('expdir%expname')\n\tret = {\n\t\t'Hist0':0,'Hist1':0,'Hist2':0,'Hist3':0,\n\t\t'Hist4':0,'Hist5':0,'Hist6':0,'Hist7':0,\n\t\t'Hist8':0,'Hist9':0,'Hist10':0,'Hist11':0,\n\t\t'Hist12':0,'Hist13':0,'Hist14':0,'Hist15':0,\n\t\t'Fit':False,\n\t\t'chi2':-1,\n\t\t'ERROR':False,\n\t\t'OK':True,\n\t\t'RawDetection':rawDetectionSampleDataSetA,\n\t\t'PhotonTimeOfArrival':phtoaSampleDataSetA\n\t}\n\n\t#now, exercise the photon time of arrival features of the class\n\tptitle = 'Demo of hfpy\\'s photon arrival time features'\n\txname = 'hist bin number'\n\tsweep = hfpySweep(ptitle,xname=xname,datadir=datadirname,debug=1)\n\texp = hfpyExp(run_r=ret,var=0,fit=None,doPhtoa=True,\\\n\t\tphtoaPMT1isSync=True, phtoaCalcHist=True,debug=1,\\\n\t\tphtoaCalcHistNbins=100)\n\tsweep.appendExp(exp)\n\tsweep.appendExp(exp)\n\tsweep.appendExp(exp)\n\tsweep.appendExp(exp)\n\tsweep.appendExp(exp)\n\tsweep.appendExp(exp)\n\t#sweep.plotPhtoa(pmtch=3)\n" }, { "alpha_fraction": 0.5022531151771545, "alphanum_fraction": 0.5197145938873291, "avg_line_length": 29.096044540405273, "blob_id": "46284a2733823159d999fd091fca69c68c787066", "content_id": "0294670f2e0c9819e2a0bccd8000c9d16f6c3167", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5326, "license_type": "no_license", "max_line_length": 100, "num_lines": 177, "path": "/hp3561aPY.py", "repo_name": "justboh/python_penning", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 11 16:38:51 2014\n\n@author: Justin Bohnet\n\nhp3551aPY.py is a script designed to pull off the current MAGNITUDE trace \noff the hp signal analyzer and process the data for analysis and display \nin other programs.\n\nThis script requires the use of the Prologix GPIB to USB converter\nto handle GPIB communication protocol.\n\nInputs, given as argument to running the script:\n1: COM port\n2: GPIB address of hp35621a\n3: 'fileID'\n\nOutputs:\n1: raw binary and ASCII data files at the location of the script\n2: a processed data file 'fileID_data_XX' where XX is current time minutes \n this data file appears in the folder C:\\data\\. The script prompts you to\n create this folder if it does not exist\n\"\"\"\n\nimport os.path\nimport serial\nimport sys\n\nimport struct\nimport numpy as np\nfrom datetime import datetime as dt\n\nif __name__ == '__main__':\n\n if len( sys.argv ) != 4:\n print \"Usage: \", os.path.basename( sys.argv[0] ), \"<COM port> <GPIB address> <fileID>\"\n sys.exit(1)\n\n comport = sys.argv[1];\n addr = sys.argv[2];\n fileID = sys.argv[3];\n \n ser = serial.Serial()\n \n try:\n success = True\n \n ser = serial.Serial( '\\\\\\\\.\\\\'+sys.argv[1], 9600, timeout=0.5 )\n\n '''\n This block estabilishes GPIB with the Prologix controller\n all commands proceeded by ++ talk to the controller\n '''\n cmd = '++mode 1'\n print 'Sending:', cmd \n ser.write(cmd + '\\n')\n s = ser.read(256);\n if len(s) > 0:\n print s\n\n cmd = '++addr ' + addr\n print 'Sending:', cmd \n ser.write(cmd + '\\n')\n s = ser.read(256);\n if len(s) > 0:\n print s\n\n cmd = '++auto 1'\n print 'Sending:', cmd \n ser.write(cmd + '\\n')\n s = ser.read(256);\n if len(s) > 0:\n print s\n\n #send command to dump current trace data\n cmd = 'DSTB;'\n print 'Sending:', cmd \n ser.write(cmd + '\\n')\n \n #open a file to load data into -- always overwrite\n f = open(\"plot_data.bin\", \"wb\")\n \n #read data from serial\n while (1):\n s = ser.read(1028) #trace files always 1028 bytes \n if len(s) > 0:\n f.write(s)\n else:\n break\n \n f.close() \n \n #send command to dump current trace display info\n cmd = 'DDSA;'\n print 'Sending:', cmd \n ser.write(cmd + '\\n')\n\n f = open(\"plot_settings.txt\", \"wb\") #sent in ASCII\n \n while (1):\n s = ser.read(1028) \n if len(s) > 0:\n f.write(s)\n else:\n break\n\n ser.close()\n f.close() \n \n #now process the data\n f = open(\"plot_data.bin\", \"rb\")\n byte = f.read(1)\n word = \"\" #each word is 2 bytes\n data = np.zeros(0) #init data array\n while byte:\n if not(word):\n word = byte #store first byte if word is empty\n else:\n word = byte + word #order is important\n readint = struct.unpack('h', word) #converts the 2 bytes into an int\n data = np.append(data,readint)\n word = \"\" #clear word for next byte\n byte = f.read(1)\n f.close()\n \n '''\n here we assume that it is magnitude data. Data TRACES always \n contain 400 data points\n '''\n magdata = 0.005*data[2:403] \n \n #process the screen data to get frequency info\n with open(\"plot_settings.txt\", \"r\") as f:\n line = f.readline()\n f.close()\n line = line[4:-1] #strip off meaningless characters at beginning\n\n aline_full = line.split(' ') #create array of info for access\n line = line.replace(' ','') #make line more readable for footer\n \n #remove empty elements\n aline = [x.strip() for x in aline_full if x.strip() != ''] \n aline = [x.replace(' ','') for x in aline] #remove spaces \n \n #if 'OVLD' is present, it messes up the elements for readout\n \n if aline[5] == 'OVLD': \n aline = np.delete(aline,5) \n line = line + ' OVLD' #note an overload condition for footer\n \n start = float(''.join([x for x in aline[-5] if x.isdigit()]))\n stop = float(''.join([x for x in aline[-3] if x.isdigit()]))\n numpts = magdata.size\n freq = np.linspace(start,stop,num=numpts)\n \n dataout = np.array([freq,magdata]).transpose()\n \n #get time for fileID\n minute = str(dt.now().minute)\n \n #note that BW gives the effective noise bandwidth ENBW used to convert\n #to a power spectral density if desired\n \n if os.path.exists(\"C:\\\\data\\\\\"):\n np.savetxt('C:\\\\data\\\\' + fileID +'_data_' + minute + '.txt', dataout, delimiter = ',', \n header = 'Freq, '+ aline[6], footer = line )\n else:\n print \"Need to create C:\\\\data\\\\\"\n \n except serial.SerialException, e:\n print e\n f.close()\n \n except KeyboardInterrupt, e:\n ser.close()\n f.close()" } ]
11